gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_serialization import jsonutils
import six
from nova import context
from nova import exception
from nova import objects
from nova.objects import base as base_obj
from nova.pci import stats
from nova import test
from nova.virt import hardware as hw
class InstanceInfoTests(test.NoDBTestCase):
def test_instance_info_default(self):
ii = hw.InstanceInfo()
self.assertIsNone(ii.state)
self.assertIsNone(ii.id)
self.assertEqual(0, ii.max_mem_kb)
self.assertEqual(0, ii.mem_kb)
self.assertEqual(0, ii.num_cpu)
self.assertEqual(0, ii.cpu_time_ns)
def test_instance_info(self):
ii = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
self.assertEqual('fake-state', ii.state)
self.assertEqual('fake-id', ii.id)
self.assertEqual(1, ii.max_mem_kb)
self.assertEqual(2, ii.mem_kb)
self.assertEqual(3, ii.num_cpu)
self.assertEqual(4, ii.cpu_time_ns)
def test_instance_infoi_equals(self):
ii1 = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
ii2 = hw.InstanceInfo(state='fake-state',
max_mem_kb=1,
mem_kb=2,
num_cpu=3,
cpu_time_ns=4,
id='fake-id')
ii3 = hw.InstanceInfo(state='fake-estat',
max_mem_kb=11,
mem_kb=22,
num_cpu=33,
cpu_time_ns=44,
id='fake-di')
self.assertEqual(ii1, ii2)
self.assertNotEqual(ii1, ii3)
class CpuSetTestCase(test.NoDBTestCase):
def test_get_vcpu_pin_set(self):
self.flags(vcpu_pin_set="1-3,5,^2")
cpuset_ids = hw.get_vcpu_pin_set()
self.assertEqual(set([1, 3, 5]), cpuset_ids)
def test_parse_cpu_spec_none_returns_none(self):
self.flags(vcpu_pin_set=None)
cpuset_ids = hw.get_vcpu_pin_set()
self.assertIsNone(cpuset_ids)
def test_parse_cpu_spec_valid_syntax_works(self):
cpuset_ids = hw.parse_cpu_spec("1")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1,2")
self.assertEqual(set([1, 2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(", , 1 , ,, 2, ,")
self.assertEqual(set([1, 2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-1")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1 - 1, 1 - 2 , 1 -3")
self.assertEqual(set([1, 2, 3]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1,^2")
self.assertEqual(set([1]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-2, ^1")
self.assertEqual(set([2]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec("1-3,5,^2")
self.assertEqual(set([1, 3, 5]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1 - 3 , ^2, 5")
self.assertEqual(set([1, 3, 5]), cpuset_ids)
cpuset_ids = hw.parse_cpu_spec(" 1,1, ^1")
self.assertEqual(set([]), cpuset_ids)
def test_parse_cpu_spec_invalid_syntax_raises(self):
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
" -1-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3-,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^2^")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^2-")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"--13,^^5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"a-3,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-a,5,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,b,^2")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"1-3,5,^c")
self.assertRaises(exception.Invalid,
hw.parse_cpu_spec,
"3 - 1, 5 , ^ 2 ")
def test_format_cpu_spec(self):
cpus = set([])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("", spec)
cpus = []
spec = hw.format_cpu_spec(cpus)
self.assertEqual("", spec)
cpus = set([1, 3])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1,3", spec)
cpus = [1, 3]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1,3", spec)
cpus = set([1, 2, 4, 6])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1-2,4,6", spec)
cpus = [1, 2, 4, 6]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("1-2,4,6", spec)
cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
spec = hw.format_cpu_spec(cpus)
self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
spec = hw.format_cpu_spec(cpus)
self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
cpus = set([1, 2, 4, 6])
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("1,2,4,6", spec)
cpus = [1, 2, 4, 6]
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("1,2,4,6", spec)
cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
spec = hw.format_cpu_spec(cpus, allow_ranges=False)
self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
class VCPUTopologyTest(test.NoDBTestCase):
def test_validate_config(self):
testdata = [
{ # Flavor sets preferred topology only
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {}
},
"expect": (
8, 2, 1, 65536, 65536, 65536
)
},
{ # Image topology overrides flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_max_threads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": (
4, 2, 2, 65536, 65536, 2,
)
},
{ # Partial image topology overrides flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_sockets": "2",
}
},
"expect": (
2, -1, -1, 65536, 65536, 65536,
)
},
{ # Restrict use of threads
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_threads": "2",
}),
"image": {
"properties": {
"hw_cpu_max_threads": "1",
}
},
"expect": (
-1, -1, -1, 65536, 65536, 1,
)
},
{ # Force use of at least two sockets
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {}
},
"expect": (
-1, -1, -1, 65536, 8, 1
)
},
{ # Image limits reduce flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "4",
}
},
"expect": (
-1, -1, -1, 65536, 4, 1
)
},
{ # Image limits kill flavor preferred
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "2",
"hw:cpu_cores": "8",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "4",
}
},
"expect": (
-1, -1, -1, 65536, 4, 65536
)
},
{ # Image limits cannot exceed flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": "16",
}
},
"expect": exception.ImageVCPULimitsRangeExceeded,
},
{ # Image preferred cannot exceed flavor
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_cores": "16",
}
},
"expect": exception.ImageVCPUTopologyRangeExceeded,
},
]
for topo_test in testdata:
image_meta = objects.ImageMeta.from_dict(topo_test["image"])
if type(topo_test["expect"]) == tuple:
(preferred,
maximum) = hw._get_cpu_topology_constraints(
topo_test["flavor"], image_meta)
self.assertEqual(topo_test["expect"][0], preferred.sockets)
self.assertEqual(topo_test["expect"][1], preferred.cores)
self.assertEqual(topo_test["expect"][2], preferred.threads)
self.assertEqual(topo_test["expect"][3], maximum.sockets)
self.assertEqual(topo_test["expect"][4], maximum.cores)
self.assertEqual(topo_test["expect"][5], maximum.threads)
else:
self.assertRaises(topo_test["expect"],
hw._get_cpu_topology_constraints,
topo_test["flavor"],
image_meta)
def test_possible_topologies(self):
testdata = [
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
[4, 1, 2],
[2, 2, 2],
[1, 4, 2],
]
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1024,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 2, 1],
[2, 4, 1],
[1, 8, 1],
[4, 1, 2],
[2, 2, 2],
[1, 4, 2],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"expect": [
[8, 1, 1],
[4, 1, 2],
]
},
{
"allow_threads": True,
"vcpus": 7,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"expect": [
[7, 1, 1],
[1, 7, 1],
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 2,
"maxcores": 1,
"maxthreads": 1,
"expect": exception.ImageVCPULimitsRangeImpossible,
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 2,
"maxcores": 1,
"maxthreads": 4,
"expect": exception.ImageVCPULimitsRangeImpossible,
},
]
for topo_test in testdata:
if type(topo_test["expect"]) == list:
actual = []
for topology in hw._get_possible_cpu_topologies(
topo_test["vcpus"],
objects.VirtCPUTopology(
sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"]):
actual.append([topology.sockets,
topology.cores,
topology.threads])
self.assertEqual(topo_test["expect"], actual)
else:
self.assertRaises(topo_test["expect"],
hw._get_possible_cpu_topologies,
topo_test["vcpus"],
objects.VirtCPUTopology(
sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"])
def test_sorting_topologies(self):
testdata = [
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 8,
"maxcores": 8,
"maxthreads": 2,
"sockets": 4,
"cores": 2,
"threads": 1,
"expect": [
[4, 2, 1], # score = 2
[8, 1, 1], # score = 1
[2, 4, 1], # score = 1
[1, 8, 1], # score = 1
[4, 1, 2], # score = 1
[2, 2, 2], # score = 1
[1, 4, 2], # score = 1
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1024,
"maxthreads": 2,
"sockets": -1,
"cores": 4,
"threads": -1,
"expect": [
[2, 4, 1], # score = 1
[1, 4, 2], # score = 1
[8, 1, 1], # score = 0
[4, 2, 1], # score = 0
[1, 8, 1], # score = 0
[4, 1, 2], # score = 0
[2, 2, 2], # score = 0
]
},
{
"allow_threads": True,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"sockets": -1,
"cores": -1,
"threads": 2,
"expect": [
[4, 1, 2], # score = 1
[8, 1, 1], # score = 0
]
},
{
"allow_threads": False,
"vcpus": 8,
"maxsockets": 1024,
"maxcores": 1,
"maxthreads": 2,
"sockets": -1,
"cores": -1,
"threads": 2,
"expect": [
[8, 1, 1], # score = 0
]
},
]
for topo_test in testdata:
actual = []
possible = hw._get_possible_cpu_topologies(
topo_test["vcpus"],
objects.VirtCPUTopology(sockets=topo_test["maxsockets"],
cores=topo_test["maxcores"],
threads=topo_test["maxthreads"]),
topo_test["allow_threads"])
tops = hw._sort_possible_cpu_topologies(
possible,
objects.VirtCPUTopology(sockets=topo_test["sockets"],
cores=topo_test["cores"],
threads=topo_test["threads"]))
for topology in tops:
actual.append([topology.sockets,
topology.cores,
topology.threads])
self.assertEqual(topo_test["expect"], actual)
def test_best_config(self):
testdata = [
{ # Flavor sets preferred topology only
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1"
}),
"image": {
"properties": {}
},
"expect": [8, 2, 1],
},
{ # Image topology overrides flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_maxthreads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": [4, 2, 2],
},
{ # Image topology overrides flavor
"allow_threads": False,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
"hw:cpu_maxthreads": "2",
}),
"image": {
"properties": {
"hw_cpu_sockets": "4",
"hw_cpu_cores": "2",
"hw_cpu_threads": "2",
}
},
"expect": [8, 2, 1],
},
{ # Partial image topology overrides flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1"
}),
"image": {
"properties": {
"hw_cpu_sockets": "2"
}
},
"expect": [2, 8, 1],
},
{ # Restrict use of threads
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_threads": "1"
}),
"image": {
"properties": {}
},
"expect": [16, 1, 1]
},
{ # Force use of at least two sockets
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {}
},
"expect": [16, 1, 1]
},
{ # Image limits reduce flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_max_sockets": "8",
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_sockets": 4,
}
},
"expect": [4, 4, 1]
},
{ # Image limits kill flavor preferred
"allow_threads": True,
"flavor": objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs={
"hw:cpu_sockets": "2",
"hw:cpu_cores": "8",
"hw:cpu_threads": "1",
}),
"image": {
"properties": {
"hw_cpu_max_cores": 4,
}
},
"expect": [16, 1, 1]
},
{ # NUMA needs threads, only cores requested by flavor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_cores": "2",
}),
"image": {
"properties": {
"hw_cpu_max_cores": 2,
}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=2)),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024)]),
"expect": [1, 2, 2]
},
{ # NUMA needs threads, but more than requested by flavor - the
# least amount of threads wins
"allow_threads": True,
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_threads": "2",
}),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [2, 1, 2]
},
{ # NUMA needs threads, but more than limit in flavor - the
# least amount of threads which divides into the vcpu
# count wins. So with desired 4, max of 3, and
# vcpu count of 4, we should get 2 threads.
"allow_threads": True,
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_max_sockets": "5",
"hw:cpu_max_cores": "2",
"hw:cpu_max_threads": "3",
}),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [2, 1, 2]
},
{ # NUMA needs threads, but thread count does not
# divide into flavor vcpu count, so we must
# reduce thread count to closest divisor
"allow_threads": True,
"flavor": objects.Flavor(vcpus=6, memory_mb=2048,
extra_specs={
}),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [2, 1, 3]
},
{ # NUMA needs different number of threads per cell - the least
# amount of threads wins
"allow_threads": True,
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={}),
"image": {
"properties": {}
},
"numa_topology": objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=2, threads=2)),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 5, 6, 7]), memory=1024,
cpu_topology=objects.VirtCPUTopology(
sockets=1, cores=1, threads=4))]),
"expect": [4, 1, 2]
},
]
for topo_test in testdata:
image_meta = objects.ImageMeta.from_dict(topo_test["image"])
topology = hw._get_desirable_cpu_topologies(
topo_test["flavor"],
image_meta,
topo_test["allow_threads"],
topo_test.get("numa_topology"))[0]
self.assertEqual(topo_test["expect"][0], topology.sockets)
self.assertEqual(topo_test["expect"][1], topology.cores)
self.assertEqual(topo_test["expect"][2], topology.threads)
class NUMATopologyTest(test.NoDBTestCase):
def test_topology_constraints(self):
testdata = [
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
}),
"image": {
},
"expect": None,
},
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 2
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 5, 6, 7]), memory=1024),
]),
},
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:mem_page_size": 2048
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=2048, pagesize=2048)
]),
},
{
# vcpus is not a multiple of nodes, so it
# is an error to not provide cpu/mem mapping
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 3
}),
"image": {
},
"expect": exception.ImageNUMATopologyAsymmetric,
},
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
"hw:numa_nodes": 3,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "4,6",
"hw:numa_mem.1": "512",
"hw:numa_cpus.2": "5,7",
"hw:numa_mem.2": "512",
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 6]), memory=512),
objects.InstanceNUMACell(
id=2, cpuset=set([5, 7]), memory=512)
]),
},
{
"flavor": objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={
}),
"image": {
"properties": {
"hw_numa_nodes": 3,
"hw_numa_cpus.0": "0-3",
"hw_numa_mem.0": "1024",
"hw_numa_cpus.1": "4,6",
"hw_numa_mem.1": "512",
"hw_numa_cpus.2": "5,7",
"hw_numa_mem.2": "512",
},
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([4, 6]), memory=512),
objects.InstanceNUMACell(
id=2, cpuset=set([5, 7]), memory=512)
]),
},
{
# Request a CPU that is out of range
# wrt vCPU count
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 1,
"hw:numa_cpus.0": "0-16",
"hw:numa_mem.0": "2048",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUOutOfRange,
},
{
# Request the same CPU in two nodes
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-7",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "0-7",
"hw:numa_mem.1": "1024",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUDuplicates,
},
{
# Request with some CPUs not assigned
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-2",
"hw:numa_mem.0": "1024",
"hw:numa_cpus.1": "3-4",
"hw:numa_mem.1": "1024",
}),
"image": {
},
"expect": exception.ImageNUMATopologyCPUsUnassigned,
},
{
# Request too little memory vs flavor total
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "512",
"hw:numa_cpus.1": "4-7",
"hw:numa_mem.1": "512",
}),
"image": {
},
"expect": exception.ImageNUMATopologyMemoryOutOfRange,
},
{
# Request too much memory vs flavor total
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.0": "1576",
"hw:numa_cpus.1": "4-7",
"hw:numa_mem.1": "1576",
}),
"image": {
},
"expect": exception.ImageNUMATopologyMemoryOutOfRange,
},
{
# Request missing mem.0
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_cpus.0": "0-3",
"hw:numa_mem.1": "1576",
}),
"image": {
},
"expect": exception.ImageNUMATopologyIncomplete,
},
{
# Request missing cpu.0
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
"hw:numa_mem.0": "1576",
"hw:numa_cpus.1": "4-7",
}),
"image": {
},
"expect": exception.ImageNUMATopologyIncomplete,
},
{
# Image attempts to override flavor
"flavor": objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2,
}),
"image": {
"properties": {
"hw_numa_nodes": 4}
},
"expect": exception.ImageNUMATopologyForbidden,
},
{
# NUMA + CPU pinning requested in the flavor
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2, "hw:cpu_policy": "dedicated"
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_pinning={}),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024,
cpu_pinning={})])
},
{
# no NUMA + CPU pinning requested in the flavor
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:cpu_policy": "dedicated"
}),
"image": {
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_pinning={})])
},
{
# NUMA + CPU pinning requested in the image
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2
}),
"image": {
"properties": {
"hw_cpu_policy": "dedicated"}
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=1024,
cpu_pinning={}),
objects.InstanceNUMACell(
id=1, cpuset=set([2, 3]), memory=1024,
cpu_pinning={})])
},
{
# no NUMA + CPU pinning requested in the image
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={}),
"image": {
"properties": {
"hw_cpu_policy": "dedicated"}
},
"expect": objects.InstanceNUMATopology(cells=
[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1, 2, 3]), memory=2048,
cpu_pinning={})])
},
{
# Invalid CPU pinning override
"flavor": objects.Flavor(vcpus=4, memory_mb=2048,
extra_specs={
"hw:numa_nodes": 2, "hw:cpu_policy": "shared"
}),
"image": {
"properties": {
"hw_cpu_policy": "dedicated"}
},
"expect": exception.ImageCPUPinningForbidden,
},
]
for testitem in testdata:
image_meta = objects.ImageMeta.from_dict(testitem["image"])
if testitem["expect"] is None:
topology = hw.numa_get_constraints(
testitem["flavor"], image_meta)
self.assertIsNone(topology)
elif type(testitem["expect"]) == type:
self.assertRaises(testitem["expect"],
hw.numa_get_constraints,
testitem["flavor"],
image_meta)
else:
topology = hw.numa_get_constraints(
testitem["flavor"], image_meta)
self.assertIsNotNone(topology)
self.assertEqual(len(testitem["expect"].cells),
len(topology.cells))
for i in range(len(topology.cells)):
self.assertEqual(testitem["expect"].cells[i].id,
topology.cells[i].id)
self.assertEqual(testitem["expect"].cells[i].cpuset,
topology.cells[i].cpuset)
self.assertEqual(testitem["expect"].cells[i].memory,
topology.cells[i].memory)
self.assertEqual(testitem["expect"].cells[i].pagesize,
topology.cells[i].pagesize)
self.assertEqual(testitem["expect"].cells[i].cpu_pinning,
topology.cells[i].cpu_pinning)
def test_host_usage_contiguous(self):
hpages0_4K = objects.NUMAPagesTopology(size_kb=4, total=256, used=0)
hpages0_2M = objects.NUMAPagesTopology(size_kb=2048, total=0, used=1)
hpages1_4K = objects.NUMAPagesTopology(size_kb=4, total=128, used=2)
hpages1_2M = objects.NUMAPagesTopology(size_kb=2048, total=0, used=3)
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[
hpages0_4K, hpages0_2M],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 6]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[
hpages1_4K, hpages1_2M],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([5, 7]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256),
objects.InstanceNUMACell(id=1, cpuset=set([4]), memory=256),
])
instance2 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256),
objects.InstanceNUMACell(id=1, cpuset=set([5, 7]), memory=256),
])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1, instance2])
self.assertEqual(len(hosttopo), len(hostusage))
self.assertIsInstance(hostusage.cells[0], objects.NUMACell)
self.assertEqual(hosttopo.cells[0].cpuset,
hostusage.cells[0].cpuset)
self.assertEqual(hosttopo.cells[0].memory,
hostusage.cells[0].memory)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertEqual(hostusage.cells[0].mempages, [
hpages0_4K, hpages0_2M])
self.assertIsInstance(hostusage.cells[1], objects.NUMACell)
self.assertEqual(hosttopo.cells[1].cpuset,
hostusage.cells[1].cpuset)
self.assertEqual(hosttopo.cells[1].memory,
hostusage.cells[1].memory)
self.assertEqual(hostusage.cells[1].cpu_usage, 3)
self.assertEqual(hostusage.cells[1].memory_usage, 512)
self.assertEqual(hostusage.cells[1].mempages, [
hpages1_4K, hpages1_2M])
self.assertEqual(256, hpages0_4K.total)
self.assertEqual(0, hpages0_4K.used)
self.assertEqual(0, hpages0_2M.total)
self.assertEqual(1, hpages0_2M.used)
self.assertIsInstance(hostusage.cells[2], objects.NUMACell)
self.assertEqual(hosttopo.cells[2].cpuset,
hostusage.cells[2].cpuset)
self.assertEqual(hosttopo.cells[2].memory,
hostusage.cells[2].memory)
self.assertEqual(hostusage.cells[2].cpu_usage, 0)
self.assertEqual(hostusage.cells[2].memory_usage, 0)
self.assertEqual(128, hpages1_4K.total)
self.assertEqual(2, hpages1_4K.used)
self.assertEqual(0, hpages1_2M.total)
self.assertEqual(3, hpages1_2M.used)
def test_host_usage_sparse(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=5, cpuset=set([4, 6]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=6, cpuset=set([5, 7]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=256),
objects.InstanceNUMACell(id=6, cpuset=set([4]), memory=256),
])
instance2 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[]),
objects.InstanceNUMACell(id=5, cpuset=set([5, 7]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[]),
])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1, instance2])
self.assertEqual(len(hosttopo), len(hostusage))
self.assertIsInstance(hostusage.cells[0], objects.NUMACell)
self.assertEqual(hosttopo.cells[0].id,
hostusage.cells[0].id)
self.assertEqual(hosttopo.cells[0].cpuset,
hostusage.cells[0].cpuset)
self.assertEqual(hosttopo.cells[0].memory,
hostusage.cells[0].memory)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertIsInstance(hostusage.cells[1], objects.NUMACell)
self.assertEqual(hosttopo.cells[1].id,
hostusage.cells[1].id)
self.assertEqual(hosttopo.cells[1].cpuset,
hostusage.cells[1].cpuset)
self.assertEqual(hosttopo.cells[1].memory,
hostusage.cells[1].memory)
self.assertEqual(hostusage.cells[1].cpu_usage, 2)
self.assertEqual(hostusage.cells[1].memory_usage, 256)
self.assertIsInstance(hostusage.cells[2], objects.NUMACell)
self.assertEqual(hosttopo.cells[2].cpuset,
hostusage.cells[2].cpuset)
self.assertEqual(hosttopo.cells[2].memory,
hostusage.cells[2].memory)
self.assertEqual(hostusage.cells[2].cpu_usage, 1)
self.assertEqual(hostusage.cells[2].memory_usage, 256)
def test_host_usage_culmulative_with_free(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]), memory=1024,
cpu_usage=2, memory_usage=512, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 6]), memory=512,
cpu_usage=1, memory_usage=512, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([5, 7]), memory=256,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1, 2]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=256),
objects.InstanceNUMACell(id=2, cpuset=set([4]), memory=256)])
hostusage = hw.numa_usage_from_instances(
hosttopo, [instance1])
self.assertIsInstance(hostusage.cells[0], objects.NUMACell)
self.assertEqual(hostusage.cells[0].cpu_usage, 5)
self.assertEqual(hostusage.cells[0].memory_usage, 1024)
self.assertIsInstance(hostusage.cells[1], objects.NUMACell)
self.assertEqual(hostusage.cells[1].cpu_usage, 2)
self.assertEqual(hostusage.cells[1].memory_usage, 768)
self.assertIsInstance(hostusage.cells[2], objects.NUMACell)
self.assertEqual(hostusage.cells[2].cpu_usage, 1)
self.assertEqual(hostusage.cells[2].memory_usage, 256)
# Test freeing of resources
hostusage = hw.numa_usage_from_instances(
hostusage, [instance1], free=True)
self.assertEqual(hostusage.cells[0].cpu_usage, 2)
self.assertEqual(hostusage.cells[0].memory_usage, 512)
self.assertEqual(hostusage.cells[1].cpu_usage, 1)
self.assertEqual(hostusage.cells[1].memory_usage, 512)
self.assertEqual(hostusage.cells[2].cpu_usage, 0)
self.assertEqual(hostusage.cells[2].memory_usage, 0)
def test_topo_usage_none(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=512,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
instance1 = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0, 1]), memory=256),
objects.InstanceNUMACell(id=2, cpuset=set([2]), memory=256),
])
hostusage = hw.numa_usage_from_instances(
None, [instance1])
self.assertIsNone(hostusage)
hostusage = hw.numa_usage_from_instances(
hosttopo, [])
self.assertEqual(hostusage.cells[0].cpu_usage, 0)
self.assertEqual(hostusage.cells[0].memory_usage, 0)
self.assertEqual(hostusage.cells[1].cpu_usage, 0)
self.assertEqual(hostusage.cells[1].memory_usage, 0)
hostusage = hw.numa_usage_from_instances(
hosttopo, None)
self.assertEqual(hostusage.cells[0].cpu_usage, 0)
self.assertEqual(hostusage.cells[0].memory_usage, 0)
self.assertEqual(hostusage.cells[1].cpu_usage, 0)
self.assertEqual(hostusage.cells[1].memory_usage, 0)
def assertNUMACellMatches(self, expected_cell, got_cell):
attrs = ('cpuset', 'memory', 'id')
if isinstance(expected_cell, objects.NUMATopology):
attrs += ('cpu_usage', 'memory_usage')
for attr in attrs:
self.assertEqual(getattr(expected_cell, attr),
getattr(got_cell, attr))
def test_json(self):
expected = objects.NUMATopology(
cells=[
objects.NUMACell(id=1, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([3, 4]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([]))])
got = objects.NUMATopology.obj_from_db_obj(expected._to_json())
for exp_cell, got_cell in zip(expected.cells, got.cells):
self.assertNUMACellMatches(exp_cell, got_cell)
class VirtNUMATopologyCellUsageTestCase(test.NoDBTestCase):
def test_fit_instance_cell_success_no_limit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(host_cell, instance_cell)
self.assertIsInstance(fitted_cell, objects.InstanceNUMACell)
self.assertEqual(host_cell.id, fitted_cell.id)
def test_fit_instance_cell_success_w_limit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=2,
memory_usage=1024,
mempages=[], siblings=[],
pinned_cpus=set([]))
limit_cell = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsInstance(fitted_cell, objects.InstanceNUMACell)
self.assertEqual(host_cell.id, fitted_cell.id)
def test_fit_instance_cell_self_overcommit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))
limit_cell = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3]), memory=4096)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
def test_fit_instance_cell_fail_w_limit(self):
host_cell = objects.NUMACell(id=4, cpuset=set([1, 2]), memory=1024,
cpu_usage=2,
memory_usage=1024,
mempages=[], siblings=[],
pinned_cpus=set([]))
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=4096)
limit_cell = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
instance_cell = objects.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3, 4, 5]), memory=1024)
fitted_cell = hw._numa_fit_instance_cell(
host_cell, instance_cell, limit_cell=limit_cell)
self.assertIsNone(fitted_cell)
class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
def setUp(self):
super(VirtNUMAHostTopologyTestCase, self).setUp()
self.host = objects.NUMATopology(
cells=[
objects.NUMACell(id=1, cpuset=set([1, 2]), memory=2048,
cpu_usage=2, memory_usage=2048,
mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([3, 4]), memory=2048,
cpu_usage=2, memory_usage=2048,
mempages=[], siblings=[],
pinned_cpus=set([]))])
self.limits = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
self.instance1 = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=2048)])
self.instance2 = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2, 3, 4]), memory=1024)])
self.instance3 = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), memory=1024)])
def test_get_fitting_success_no_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance3)
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
def test_get_fitting_success_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
self.host, self.instance3, self.limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance.cells[0].id)
def test_get_fitting_fails_no_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
self.host, self.instance2, self.limits)
self.assertIsNone(fitted_instance)
def test_get_fitting_culmulative_fails_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance2, self.limits)
self.assertIsNone(fitted_instance2)
def test_get_fitting_culmulative_success_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host, self.instance1, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instances(self.host,
[fitted_instance1])
fitted_instance2 = hw.numa_fit_instance_to_host(
self.host, self.instance3, self.limits)
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
self.assertEqual(2, fitted_instance2.cells[0].id)
def test_get_fitting_pci_success(self):
pci_request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '8086'}])
pci_reqs = [pci_request]
pci_stats = stats.PciDeviceStats()
with mock.patch.object(stats.PciDeviceStats,
'support_requests', return_value= True):
fitted_instance1 = hw.numa_fit_instance_to_host(self.host,
self.instance1,
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsInstance(fitted_instance1,
objects.InstanceNUMATopology)
def test_get_fitting_pci_fail(self):
pci_request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '8086'}])
pci_reqs = [pci_request]
pci_stats = stats.PciDeviceStats()
with mock.patch.object(stats.PciDeviceStats,
'support_requests', return_value= False):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host,
self.instance1,
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsNone(fitted_instance1)
class NumberOfSerialPortsTest(test.NoDBTestCase):
def test_flavor(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
image_meta = objects.ImageMeta.from_dict({})
num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
self.assertEqual(3, num_ports)
def test_image_meta(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048, extra_specs={})
image_meta = objects.ImageMeta.from_dict(
{"properties": {"hw_serial_port_count": 2}})
num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
self.assertEqual(2, num_ports)
def test_flavor_invalid_value(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 'foo'})
image_meta = objects.ImageMeta.from_dict({})
self.assertRaises(exception.ImageSerialPortNumberInvalid,
hw.get_number_of_serial_ports,
flavor, image_meta)
def test_image_meta_smaller_than_flavor(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
image_meta = objects.ImageMeta.from_dict(
{"properties": {"hw_serial_port_count": 2}})
num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
self.assertEqual(2, num_ports)
def test_flavor_smaller_than_image_meta(self):
flavor = objects.Flavor(vcpus=8, memory_mb=2048,
extra_specs={"hw:serial_port_count": 3})
image_meta = objects.ImageMeta.from_dict(
{"properties": {"hw_serial_port_count": 4}})
self.assertRaises(exception.ImageSerialPortNumberExceedFlavorValue,
hw.get_number_of_serial_ports,
flavor, image_meta)
class HelperMethodsTestCase(test.NoDBTestCase):
def setUp(self):
super(HelperMethodsTestCase, self).setUp()
self.hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([0, 1]), memory=512,
memory_usage=0, cpu_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=512,
memory_usage=0, cpu_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
])
self.instancetopo = objects.InstanceNUMATopology(
instance_uuid='fake-uuid',
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0, 1]), memory=256, pagesize=2048,
cpu_pinning={1: 3, 0: 4}),
objects.InstanceNUMACell(
id=1, cpuset=set([2]), memory=256, pagesize=2048,
cpu_pinning={2: 5}),
])
self.context = context.RequestContext('fake-user',
'fake-project')
def _check_usage(self, host_usage):
self.assertEqual(2, host_usage.cells[0].cpu_usage)
self.assertEqual(256, host_usage.cells[0].memory_usage)
self.assertEqual(1, host_usage.cells[1].cpu_usage)
self.assertEqual(256, host_usage.cells[1].memory_usage)
def test_dicts_json(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_dicts_instance_json(self):
host = {'numa_topology': self.hosttopo}
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, objects.NUMATopology)
self._check_usage(res)
def test_dicts_instance_json_old(self):
host = {'numa_topology': self.hosttopo}
instance = {'numa_topology':
jsonutils.dumps(self.instancetopo._to_dict())}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, objects.NUMATopology)
self._check_usage(res)
def test_dicts_host_json(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_dicts_host_json_old(self):
host = {'numa_topology': jsonutils.dumps(
self.hosttopo._to_dict())}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_object_host_instance_json(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_object_host_instance(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_instance_with_fetch(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = {'uuid': fake_uuid}
with mock.patch.object(objects.InstanceNUMATopology,
'get_by_instance_uuid', return_value=None) as get_mock:
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self.assertTrue(get_mock.called)
def test_object_instance_with_load(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = objects.Instance(context=self.context, uuid=fake_uuid)
with mock.patch.object(objects.InstanceNUMATopology,
'get_by_instance_uuid', return_value=None) as get_mock:
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self.assertTrue(get_mock.called)
def test_instance_serialized_by_build_request_spec(self):
host = objects.ComputeNode(numa_topology=self.hosttopo._to_json())
fake_uuid = str(uuid.uuid4())
instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid,
numa_topology=self.instancetopo)
# NOTE (ndipanov): This emulates scheduler.utils.build_request_spec
# We can remove this test once we no longer use that method.
instance_raw = jsonutils.to_primitive(
base_obj.obj_to_primitive(instance))
res = hw.get_host_numa_usage_from_instance(host, instance_raw)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_attr_host(self):
class Host(object):
def __init__(obj):
obj.numa_topology = self.hosttopo._to_json()
host = Host()
instance = {'numa_topology': self.instancetopo._to_json()}
res = hw.get_host_numa_usage_from_instance(host, instance)
self.assertIsInstance(res, six.string_types)
self._check_usage(objects.NUMATopology.obj_from_db_obj(res))
def test_never_serialize_result(self):
host = {'numa_topology': self.hosttopo._to_json()}
instance = {'numa_topology': self.instancetopo}
res = hw.get_host_numa_usage_from_instance(host, instance,
never_serialize_result=True)
self.assertIsInstance(res, objects.NUMATopology)
self._check_usage(res)
def test_dict_numa_topology_to_obj(self):
fake_uuid = str(uuid.uuid4())
instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid,
numa_topology=self.instancetopo)
instance_dict = base_obj.obj_to_primitive(instance)
instance_numa_topo = hw.instance_topology_from_instance(instance_dict)
for expected_cell, actual_cell in zip(self.instancetopo.cells,
instance_numa_topo.cells):
for k in expected_cell.fields:
self.assertEqual(getattr(expected_cell, k),
getattr(actual_cell, k))
class VirtMemoryPagesTestCase(test.NoDBTestCase):
def test_cell_instance_pagesize(self):
cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=2048)
self.assertEqual(0, cell.id)
self.assertEqual(set([0]), cell.cpuset)
self.assertEqual(1024, cell.memory)
self.assertEqual(2048, cell.pagesize)
def test_numa_pagesize_usage_from_cell(self):
instcell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=512, pagesize=2048)
hostcell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[objects.NUMAPagesTopology(
size_kb=2048,
total=512,
used=0)],
siblings=[], pinned_cpus=set([]))
topo = hw._numa_pagesize_usage_from_cell(hostcell, instcell, 1)
self.assertEqual(2048, topo[0].size_kb)
self.assertEqual(512, topo[0].total)
self.assertEqual(256, topo[0].used)
def _test_get_requested_mempages_pagesize(self, spec=None, props=None):
flavor = objects.Flavor(vcpus=16, memory_mb=2048,
extra_specs=spec or {})
image_meta = objects.ImageMeta.from_dict({"properties": props or {}})
return hw._numa_get_pagesize_constraints(flavor, image_meta)
def test_get_requested_mempages_pagesize_from_flavor_swipe(self):
self.assertEqual(
hw.MEMPAGES_SMALL, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "small"}))
self.assertEqual(
hw.MEMPAGES_LARGE, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "large"}))
self.assertEqual(
hw.MEMPAGES_ANY, self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "any"}))
def test_get_requested_mempages_pagesize_from_flavor_specific(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_flavor_invalid(self):
self.assertRaises(
exception.MemoryPageSizeInvalid,
self._test_get_requested_mempages_pagesize,
{"hw:mem_page_size": "foo"})
def test_get_requested_mempages_pagesize_from_image_flavor_any(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "any"},
props={"hw_mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_image_flavor_large(self):
self.assertEqual(
2048,
self._test_get_requested_mempages_pagesize(
spec={"hw:mem_page_size": "large"},
props={"hw_mem_page_size": "2048"}))
def test_get_requested_mempages_pagesize_from_image_forbidden(self):
self.assertRaises(
exception.MemoryPageSizeForbidden,
self._test_get_requested_mempages_pagesize,
{"hw:mem_page_size": "small"},
{"hw_mem_page_size": "2048"})
def test_get_requested_mempages_pagesize_from_image_forbidden2(self):
self.assertRaises(
exception.MemoryPageSizeForbidden,
self._test_get_requested_mempages_pagesize,
{}, {"hw_mem_page_size": "2048"})
def test_cell_accepts_request_wipe(self):
host_cell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=262144, used=0),
],
siblings=[], pinned_cpus=set([]))
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_SMALL)
self.assertEqual(
4,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_ANY)
self.assertEqual(
4,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE)
self.assertIsNone(hw._numa_cell_supports_pagesize_request(
host_cell, inst_cell))
def test_cell_accepts_request_large_pass(self):
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=hw.MEMPAGES_LARGE)
host_cell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=256, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertEqual(
2048,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
def test_cell_accepts_request_custom_pass(self):
inst_cell = objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024, pagesize=2048)
host_cell = objects.NUMACell(
id=0, cpuset=set([0]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=256, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)
],
siblings=[], pinned_cpus=set([]))
self.assertEqual(
2048,
hw._numa_cell_supports_pagesize_request(host_cell, inst_cell))
class _CPUPinningTestCaseBase(object):
def assertEqualTopology(self, expected, got):
for attr in ('sockets', 'cores', 'threads'):
self.assertEqual(getattr(expected, attr), getattr(got, attr),
"Mismatch on %s" % attr)
def assertInstanceCellPinned(self, instance_cell, cell_ids=None):
default_cell_id = 0
self.assertIsNotNone(instance_cell)
if cell_ids is None:
self.assertEqual(default_cell_id, instance_cell.id)
else:
self.assertIn(instance_cell.id, cell_ids)
self.assertEqual(len(instance_cell.cpuset),
len(instance_cell.cpu_pinning))
class CPUPinningCellTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
def test_get_pinning_inst_too_large_cpu(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_inst_too_large_mem(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=1024,
siblings=[], mempages=[],
pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_inst_not_avail(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
pinned_cpus=set([0]),
siblings=[], mempages=[])
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_no_sibling_fits_empty(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
def test_get_pinning_no_sibling_fits_w_usage(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
pinned_cpus=set([1]), mempages=[],
siblings=[])
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2]), memory=1024)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
def test_get_pinning_instance_siblings_fits(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
def test_get_pinning_instance_siblings_host_siblings_fits_empty(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
def test_get_pinning_instance_siblings_host_siblings_fits_w_usage(self):
host_pin = objects.NUMACell(
id=0,
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
pinned_cpus=set([1, 2, 5, 6]),
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])],
mempages=[])
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), memory=2048, cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
self.assertEqualTopology(topo, inst_pin.cpu_topology)
def test_get_pinning_instance_siblings_host_siblings_fails(self):
host_pin = objects.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3]), set([4, 5]), set([6, 7])],
mempages=[], pinned_cpus=set([]))
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
inst_pin = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]), memory=2048,
cpu_topology=topo)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertIsNone(inst_pin)
def test_get_pinning_host_siblings_fit_single_core(self):
host_pin = objects.NUMACell(
id=0, cpuset=set([0, 1, 2, 3, 4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[set([0, 1, 2, 3]), set([4, 5, 6, 7])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
def test_get_pinning_host_siblings_fit(self):
host_pin = objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[set([0, 1]), set([2, 3])],
mempages=[], pinned_cpus=set([]))
inst_pin = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]),
memory=2048)
inst_pin = hw._numa_fit_instance_cell_with_pinning(host_pin, inst_pin)
self.assertInstanceCellPinned(inst_pin)
got_topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
self.assertEqualTopology(got_topo, inst_pin.cpu_topology)
class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
def test_host_numa_fit_instance_to_host_single_cell(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))]
)
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
def test_host_numa_fit_instance_to_host_single_cell_w_usage(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1]),
pinned_cpus=set([0]), memory=2048,
memory_usage=0, siblings=[],
mempages=[]),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(1,))
def test_host_numa_fit_instance_to_host_single_cell_fail(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1]), memory=2048,
pinned_cpus=set([0]), memory_usage=0,
siblings=[], mempages=[]),
objects.NUMACell(id=1, cpuset=set([2, 3]), memory=2048,
pinned_cpus=set([2]), memory_usage=0,
siblings=[], mempages=[])])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(cpuset=set([0, 1]),
memory=2048,
cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fit(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(cpuset=set([0, 1]),
memory=2048, cpu_pinning={}),
objects.InstanceNUMACell(cpuset=set([2, 3]),
memory=2048, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
def test_host_numa_fit_instance_to_host_barely_fit(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=2048, pinned_cpus=set([0]),
siblings=[], mempages=[],
memory_usage=0),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([4, 5, 6])),
objects.NUMACell(id=2, cpuset=set([8, 9, 10, 11]),
memory=2048, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([10, 11]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(cpuset=set([0, 1]),
memory=2048, cpu_pinning={}),
objects.InstanceNUMACell(cpuset=set([2, 3]),
memory=2048, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 2))
def test_host_numa_fit_instance_to_host_fail_capacity(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([0])),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([4, 5, 6]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(cpuset=set([0, 1]),
memory=2048, cpu_pinning={}),
objects.InstanceNUMACell(cpuset=set([2, 3]),
memory=2048, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fail_topology(self):
host_topo = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([4, 5, 6, 7]),
memory=4096, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([]))])
inst_topo = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(cpuset=set([0, 1]),
memory=1024, cpu_pinning={}),
objects.InstanceNUMACell(cpuset=set([2, 3]),
memory=1024, cpu_pinning={}),
objects.InstanceNUMACell(cpuset=set([4, 5]),
memory=1024, cpu_pinning={})])
inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
self.assertIsNone(inst_topo)
def test_cpu_pinning_usage_from_instances(self):
host_pin = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_pin_1 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), id=0, cpu_pinning={0: 0, 1: 3},
memory=2048)])
inst_pin_2 = objects.InstanceNUMATopology(
cells = [objects.InstanceNUMACell(
cpuset=set([0, 1]), id=0, cpu_pinning={0: 1, 1: 2},
memory=2048)])
host_pin = hw.numa_usage_from_instances(
host_pin, [inst_pin_1, inst_pin_2])
self.assertEqual(set([0, 1, 2, 3]),
host_pin.cells[0].pinned_cpus)
def test_cpu_pinning_usage_from_instances_free(self):
host_pin = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0, memory_usage=0,
siblings=[], mempages=[],
pinned_cpus=set([0, 1, 3]))])
inst_pin_1 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0]), memory=1024, cpu_pinning={0: 1}, id=0)])
inst_pin_2 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=1024, id=0,
cpu_pinning={0: 0, 1: 3})])
host_pin = hw.numa_usage_from_instances(
host_pin, [inst_pin_1, inst_pin_2], free=True)
self.assertEqual(set(), host_pin.cells[0].pinned_cpus)
def test_host_usage_from_instances_fail(self):
host_pin = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([0, 1, 2, 3]),
memory=4096, cpu_usage=0,
memory_usage=0, siblings=[],
mempages=[], pinned_cpus=set([]))])
inst_pin_1 = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
cpuset=set([0, 1]), memory=2048, id=0,
cpu_pinning={0: 0, 1: 3})])
inst_pin_2 = objects.InstanceNUMATopology(
cells = [objects.InstanceNUMACell(
cpuset=set([0, 1]), id=0, memory=2048,
cpu_pinning={0: 0, 1: 2})])
self.assertRaises(exception.CPUPinningInvalid,
hw.numa_usage_from_instances, host_pin,
[inst_pin_1, inst_pin_2])
| |
import unittest
import warnings
from collections import OrderedDict
import numpy as np
import numpy.testing as np_test
from pgmpy.extern.six.moves import range
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.factors.discrete import JointProbabilityDistribution as JPD
from pgmpy.factors.discrete import factor_divide
from pgmpy.factors.discrete import factor_product
from pgmpy.factors.discrete.CPD import TabularCPD
from pgmpy.independencies import Independencies
from pgmpy.models import BayesianModel
from pgmpy.models import MarkovModel
class TestFactorInit(unittest.TestCase):
def test_class_init(self):
phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
self.assertEqual(phi.variables, ['x1', 'x2', 'x3'])
np_test.assert_array_equal(phi.cardinality, np.array([2, 2, 2]))
np_test.assert_array_equal(phi.values, np.ones(8).reshape(2, 2, 2))
def test_class_init1(self):
phi = DiscreteFactor([1, 2, 3], [2, 3, 2], np.arange(12))
self.assertEqual(phi.variables, [1, 2, 3])
np_test.assert_array_equal(phi.cardinality, np.array([2, 3, 2]))
np_test.assert_array_equal(phi.values, np.arange(12).reshape(2, 3, 2))
def test_class_init_sizeerror(self):
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(9))
def test_class_init_typeerror(self):
self.assertRaises(TypeError, DiscreteFactor, 'x1', [3], [1, 2, 3])
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x1', 'x3'], [2, 3, 2], range(12))
def test_init_size_var_card_not_equal(self):
self.assertRaises(ValueError, DiscreteFactor, ['x1', 'x2'], [2], np.ones(2))
class TestFactorMethods(unittest.TestCase):
def setUp(self):
self.phi = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], np.random.uniform(5, 10, size=8))
self.phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 3, 2], range(12))
self.phi2 = DiscreteFactor([('x1', 0), ('x2', 0), ('x3', 0)], [2, 3, 2], range(12))
# This larger factor (phi3) caused a bug in reduce
card3 = [3, 3, 3, 2, 2, 2, 2, 2, 2]
self.phi3 = DiscreteFactor(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'],
card3, np.arange(np.prod(card3), dtype=np.float))
self.tup1 = ('x1', 'x2')
self.tup2 = ('x2', 'x3')
self.tup3 = ('x3', (1, 'x4'))
self.phi4 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 3, 4], np.random.uniform(3, 10, size=24))
self.phi5 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 3, 4], range(24))
self.card6 = [4, 2, 1, 3, 5, 6]
self.phi6 = DiscreteFactor([self.tup1, self.tup2, self.tup3, self.tup1 + self.tup2,
self.tup2 + self.tup3, self.tup3 + self.tup1], self.card6,
np.arange(np.prod(self.card6), dtype=np.float))
self.var1 = 'x1'
self.var2 = ('x2', 1)
self.var3 = frozenset(['x1', 'x2'])
self.phi7 = DiscreteFactor([self.var1, self.var2], [3, 2], [3, 2, 4, 5, 9, 8])
self.phi8 = DiscreteFactor([self.var2, self.var3], [2, 2], [2, 1, 5, 6])
self.phi9 = DiscreteFactor([self.var1, self.var3], [3, 2], [3, 2, 4, 5, 9, 8])
self.phi10 = DiscreteFactor([self.var3], [2], [3, 6])
def test_scope(self):
self.assertListEqual(self.phi.scope(), ['x1', 'x2', 'x3'])
self.assertListEqual(self.phi1.scope(), ['x1', 'x2', 'x3'])
self.assertListEqual(self.phi4.scope(), [self.tup1, self.tup2, self.tup3])
def test_assignment(self):
self.assertListEqual(self.phi.assignment([0]), [[('x1', 0), ('x2', 0), ('x3', 0)]])
self.assertListEqual(self.phi.assignment([4, 5, 6]), [[('x1', 1), ('x2', 0), ('x3', 0)],
[('x1', 1), ('x2', 0), ('x3', 1)],
[('x1', 1), ('x2', 1), ('x3', 0)]])
self.assertListEqual(self.phi1.assignment(np.array([4, 5, 6])), [[('x1', 0), ('x2', 2), ('x3', 0)],
[('x1', 0), ('x2', 2), ('x3', 1)],
[('x1', 1), ('x2', 0), ('x3', 0)]])
self.assertListEqual(self.phi4.assignment(np.array([11, 12, 23])),
[[(self.tup1, 0), (self.tup2, 2), (self.tup3, 3)],
[(self.tup1, 1), (self.tup2, 0), (self.tup3, 0)],
[(self.tup1, 1), (self.tup2, 2), (self.tup3, 3)]])
def test_assignment_indexerror(self):
self.assertRaises(IndexError, self.phi.assignment, [10])
self.assertRaises(IndexError, self.phi.assignment, [1, 3, 10, 5])
self.assertRaises(IndexError, self.phi.assignment, np.array([1, 3, 10, 5]))
self.assertRaises(IndexError, self.phi4.assignment, [2, 24])
self.assertRaises(IndexError, self.phi4.assignment, np.array([24, 2, 4, 30]))
def test_get_cardinality(self):
self.assertEqual(self.phi.get_cardinality(['x1']), {'x1': 2})
self.assertEqual(self.phi.get_cardinality(['x2']), {'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x3']), {'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2']), {'x1': 2, 'x2': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x3']), {'x1': 2, 'x3': 2})
self.assertEqual(self.phi.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 2, 'x3': 2})
self.assertEqual(self.phi4.get_cardinality([self.tup1, self.tup3]),
{self.tup1: 2, self.tup3: 4})
def test_get_cardinality_scopeerror(self):
self.assertRaises(ValueError, self.phi.get_cardinality, ['x4'])
self.assertRaises(ValueError, self.phi4.get_cardinality, [('x1', 'x4')])
self.assertRaises(ValueError, self.phi4.get_cardinality, [('x3', (2, 'x4'))])
def test_get_cardinality_typeerror(self):
self.assertRaises(TypeError, self.phi.get_cardinality, 'x1')
def test_marginalize(self):
self.phi1.marginalize(['x1'])
np_test.assert_array_equal(self.phi1.values, np.array([[6, 8],
[10, 12],
[14, 16]]))
self.phi1.marginalize(['x2'])
np_test.assert_array_equal(self.phi1.values, np.array([30, 36]))
self.phi1.marginalize(['x3'])
np_test.assert_array_equal(self.phi1.values, np.array(66))
self.phi5.marginalize([self.tup1])
np_test.assert_array_equal(self.phi5.values, np.array([[12, 14, 16, 18],
[20, 22, 24, 26],
[28, 30, 32, 34]]))
self.phi5.marginalize([self.tup2])
np_test.assert_array_equal(self.phi5.values, np.array([60, 66, 72, 78]))
self.phi5.marginalize([self.tup3])
np_test.assert_array_equal(self.phi5.values, np.array([276]))
def test_marginalize_scopeerror(self):
self.assertRaises(ValueError, self.phi.marginalize, ['x4'])
self.phi.marginalize(['x1'])
self.assertRaises(ValueError, self.phi.marginalize, ['x1'])
self.assertRaises(ValueError, self.phi4.marginalize, [('x1', 'x3')])
self.phi4.marginalize([self.tup2])
self.assertRaises(ValueError, self.phi4.marginalize, [self.tup2])
def test_marginalize_typeerror(self):
self.assertRaises(TypeError, self.phi.marginalize, 'x1')
def test_marginalize_shape(self):
values = ['A', 'D', 'F', 'H']
phi3_mar = self.phi3.marginalize(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_mar.values.shape, phi3_mar.cardinality)
phi6_mar = self.phi6.marginalize([self.tup1, self.tup2], inplace=False)
np_test.assert_array_equal(phi6_mar.values.shape, phi6_mar.cardinality)
self.phi6.marginalize([self.tup1, self.tup3 + self.tup1], inplace=True)
np_test.assert_array_equal(self.phi6.values.shape, self.phi6.cardinality)
def test_normalize(self):
self.phi1.normalize()
np_test.assert_almost_equal(self.phi1.values,
np.array([[[0, 0.01515152],
[0.03030303, 0.04545455],
[0.06060606, 0.07575758]],
[[0.09090909, 0.10606061],
[0.12121212, 0.13636364],
[0.15151515, 0.16666667]]]))
self.phi5.normalize()
np_test.assert_almost_equal(self.phi5.values,
[[[0., 0.00362319, 0.00724638, 0.01086957],
[0.01449275, 0.01811594, 0.02173913, 0.02536232],
[0.02898551, 0.0326087, 0.03623188, 0.03985507]],
[[0.04347826, 0.04710145, 0.05072464, 0.05434783],
[0.05797101, 0.0615942, 0.06521739, 0.06884058],
[0.07246377, 0.07608696, 0.07971014, 0.08333333]]])
def test_reduce(self):
self.phi1.reduce([('x1', 0), ('x2', 0)])
np_test.assert_array_equal(self.phi1.values, np.array([0, 1]))
self.phi5.reduce([(self.tup1, 0), (self.tup3, 1)])
np_test.assert_array_equal(self.phi5.values, np.array([1, 5, 9]))
def test_reduce1(self):
self.phi1.reduce([('x2', 0), ('x1', 0)])
np_test.assert_array_equal(self.phi1.values, np.array([0, 1]))
self.phi5.reduce([(self.tup3, 1), (self.tup1, 0)])
np_test.assert_array_equal(self.phi5.values, np.array([1, 5, 9]))
def test_reduce_shape(self):
values = [('A', 0), ('D', 0), ('F', 0), ('H', 1)]
phi3_reduced = self.phi3.reduce(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_reduced.values.shape, phi3_reduced.cardinality)
values = [(self.tup1, 2), (self.tup3, 0)]
phi6_reduced = self.phi6.reduce(values, inplace=False)
np_test.assert_array_equal(phi6_reduced.values.shape, phi6_reduced.cardinality)
self.phi6.reduce(values, inplace=True)
np_test.assert_array_equal(self.phi6.values.shape, self.phi6.cardinality)
def test_complete_reduce(self):
self.phi1.reduce([('x1', 0), ('x2', 0), ('x3', 1)])
np_test.assert_array_equal(self.phi1.values, np.array([1]))
np_test.assert_array_equal(self.phi1.cardinality, np.array([]))
np_test.assert_array_equal(self.phi1.variables, OrderedDict())
self.phi5.reduce([(('x1', 'x2'), 1), (('x2', 'x3'), 0), (('x3', (1, 'x4')), 3)])
np_test.assert_array_equal(self.phi5.values, np.array([15]))
np_test.assert_array_equal(self.phi5.cardinality, np.array([]))
np_test.assert_array_equal(self.phi5.variables, OrderedDict())
def test_reduce_typeerror(self):
self.assertRaises(TypeError, self.phi1.reduce, 'x10')
self.assertRaises(TypeError, self.phi1.reduce, ['x10'])
self.assertRaises(TypeError, self.phi1.reduce, [('x1', 'x2')])
self.assertRaises(TypeError, self.phi1.reduce, [(0, 'x1')])
self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 'x1')])
self.assertRaises(TypeError, self.phi1.reduce, [(0.1, 0.1)])
self.assertRaises(TypeError, self.phi1.reduce, [('x1', 0.1)])
self.assertRaises(TypeError, self.phi5.reduce, [(('x1', 'x2'), 0), (('x2', 'x3'), 0.2)])
def test_reduce_scopeerror(self):
self.assertRaises(ValueError, self.phi1.reduce, [('x4', 1)])
self.assertRaises(ValueError, self.phi5.reduce, [((('x1', 0.1), 0))])
def test_reduce_sizeerror(self):
self.assertRaises(IndexError, self.phi1.reduce, [('x3', 5)])
self.assertRaises(IndexError, self.phi5.reduce, [(('x2', 'x3'), 3)])
def test_identity_factor(self):
identity_factor = self.phi.identity_factor()
self.assertEqual(list(identity_factor.variables), ['x1', 'x2', 'x3'])
np_test.assert_array_equal(identity_factor.cardinality, [2, 2, 2])
np_test.assert_array_equal(identity_factor.values, np.ones(8).reshape(2, 2, 2))
identity_factor1 = self.phi5.identity_factor()
self.assertEqual(list(identity_factor1.variables), [self.tup1, self.tup2, self.tup3])
np_test.assert_array_equal(identity_factor1.cardinality, [2, 3, 4])
np_test.assert_array_equal(identity_factor1.values, np.ones(24).reshape(2, 3, 4))
def test_factor_product(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = factor_product(phi, phi1)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3', 'x4'], [2, 2, 2, 2],
[0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3', 'x4'])
phi = DiscreteFactor(['x1', 'x2'], [3, 2], range(6))
phi1 = DiscreteFactor(['x2', 'x3'], [2, 2], range(4))
prod = factor_product(phi, phi1)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2],
[0, 0, 2, 3, 0, 2, 6, 9, 0, 4, 10, 15])
self.assertEqual(prod, expected_factor)
self.assertEqual(prod.variables, expected_factor.variables)
prod = factor_product(self.phi7, self.phi8)
expected_factor = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2],
[6, 3, 10, 12, 8, 4, 25, 30, 18, 9, 40, 48])
self.assertEqual(prod, expected_factor)
self.assertEqual(prod.variables, expected_factor.variables)
def test_product(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = phi.product(phi1, inplace=False)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3', 'x4'], [2, 2, 2, 2],
[0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3', 'x4'])
phi = DiscreteFactor(['x1', 'x2'], [3, 2], range(6))
phi1 = DiscreteFactor(['x2', 'x3'], [2, 2], range(4))
prod = phi.product(phi1, inplace=False)
expected_factor = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2],
[0, 0, 2, 3, 0, 2, 6, 9, 0, 4, 10, 15])
self.assertEqual(prod, expected_factor)
self.assertEqual(sorted(prod.variables), ['x1', 'x2', 'x3'])
phi7_copy = self.phi7
phi7_copy.product(self.phi8, inplace=True)
expected_factor = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2],
[6, 3, 10, 12, 8, 4, 25, 30, 18, 9, 40, 48])
self.assertEqual(expected_factor, phi7_copy)
self.assertEqual(phi7_copy.variables, [self.var1, self.var2, self.var3])
def test_factor_product_non_factor_arg(self):
self.assertRaises(TypeError, factor_product, 1, 2)
def test_factor_mul(self):
phi = DiscreteFactor(['x1', 'x2'], [2, 2], range(4))
phi1 = DiscreteFactor(['x3', 'x4'], [2, 2], range(4))
prod = phi * phi1
sorted_vars = ['x1', 'x2', 'x3', 'x4']
for axis in range(prod.values.ndim):
exchange_index = prod.variables.index(sorted_vars[axis])
prod.variables[axis], prod.variables[exchange_index] = prod.variables[exchange_index], prod.variables[axis]
prod.values = prod.values.swapaxes(axis, exchange_index)
np_test.assert_almost_equal(prod.values.ravel(),
np.array([0, 0, 0, 0, 0, 1, 2, 3,
0, 2, 4, 6, 0, 3, 6, 9]))
self.assertEqual(prod.variables, ['x1', 'x2', 'x3', 'x4'])
def test_factor_divide(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 2, 4])
phi2 = DiscreteFactor(['x1'], [2], [1, 2])
expected_factor = phi1.divide(phi2, inplace=False)
phi3 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 1, 2])
self.assertEqual(phi3, expected_factor)
self.phi9.divide(self.phi10, inplace=True)
np_test.assert_array_almost_equal(self.phi9.values, np.array([1.000000, 0.333333, 1.333333,
0.833333, 3.000000, 1.333333]).reshape(3, 2))
self.assertEqual(self.phi9.variables, [self.var1, self.var3])
def test_factor_divide_truediv(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 2, 4])
phi2 = DiscreteFactor(['x1'], [2], [1, 2])
div = phi1 / phi2
phi3 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 1, 2])
self.assertEqual(phi3, div)
self.phi9 = self.phi9 / self.phi10
np_test.assert_array_almost_equal(self.phi9.values, np.array([1.000000, 0.333333, 1.333333,
0.833333, 3.000000, 1.333333]).reshape(3, 2))
self.assertEqual(self.phi9.variables, [self.var1, self.var3])
def test_factor_divide_invalid(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x1'], [2], [0, 2])
div = phi1.divide(phi2, inplace=False)
np_test.assert_array_equal(div.values.ravel(), np.array([np.inf, np.inf, 1.5, 2]))
def test_factor_divide_no_common_scope(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x3'], [2], [0, 2])
self.assertRaises(ValueError, factor_divide, phi1, phi2)
phi2 = DiscreteFactor([self.var3], [2], [2, 1])
self.assertRaises(ValueError, factor_divide, self.phi7, phi2)
def test_factor_divide_non_factor_arg(self):
self.assertRaises(TypeError, factor_divide, 1, 1)
def test_eq(self):
self.assertFalse(self.phi == self.phi1)
self.assertTrue(self.phi == self.phi)
self.assertTrue(self.phi1 == self.phi1)
self.assertTrue(self.phi5 == self.phi5)
self.assertFalse(self.phi5 == self.phi6)
self.assertTrue(self.phi6 == self.phi6)
def test_eq1(self):
phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 4, 3], range(24))
phi2 = DiscreteFactor(['x2', 'x1', 'x3'], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17, 6, 7,
8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertTrue(phi1 == phi2)
self.assertEqual(phi2.variables, ['x2', 'x1', 'x3'])
phi3 = DiscreteFactor([self.tup1, self.tup2, self.tup3], [2, 4, 3], range(24))
phi4 = DiscreteFactor([self.tup2, self.tup1, self.tup3], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17,
6, 7, 8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertTrue(phi3 == phi4)
def test_hash(self):
phi1 = DiscreteFactor(['x1', 'x2'], [2, 2], [1, 2, 3, 4])
phi2 = DiscreteFactor(['x2', 'x1'], [2, 2], [1, 3, 2, 4])
self.assertEqual(hash(phi1), hash(phi2))
phi1 = DiscreteFactor(['x1', 'x2', 'x3'], [2, 2, 2], range(8))
phi2 = DiscreteFactor(['x3', 'x1', 'x2'], [2, 2, 2], [0, 2, 4, 6, 1, 3, 5, 7])
self.assertEqual(hash(phi1), hash(phi2))
var1 = TestHash(1, 2)
phi3 = DiscreteFactor([var1, self.var2, self.var3], [2, 4, 3], range(24))
phi4 = DiscreteFactor([self.var2, var1, self.var3], [4, 2, 3],
[0, 1, 2, 12, 13, 14, 3, 4, 5, 15, 16, 17,
6, 7, 8, 18, 19, 20, 9, 10, 11, 21, 22, 23])
self.assertEqual(hash(phi3), hash(phi4))
var1 = TestHash(2, 3)
var2 = TestHash('x2', 1)
phi3 = DiscreteFactor([var1, var2, self.var3], [2, 2, 2], range(8))
phi4 = DiscreteFactor([self.var3, var1, var2], [2, 2, 2], [0, 2, 4, 6, 1, 3, 5, 7])
self.assertEqual(hash(phi3), hash(phi4))
def test_maximize_single(self):
self.phi1.maximize(['x1'])
self.assertEqual(self.phi1, DiscreteFactor(['x2', 'x3'], [3, 2], [6, 7, 8, 9, 10, 11]))
self.phi1.maximize(['x2'])
self.assertEqual(self.phi1, DiscreteFactor(['x3'], [2], [10, 11]))
self.phi2 = DiscreteFactor(['x1', 'x2', 'x3'], [3, 2, 2], [0.25, 0.35, 0.08, 0.16, 0.05, 0.07,
0.00, 0.00, 0.15, 0.21, 0.08, 0.18])
self.phi2.maximize(['x2'])
self.assertEqual(self.phi2, DiscreteFactor(['x1', 'x3'], [3, 2], [0.25, 0.35, 0.05,
0.07, 0.15, 0.21]))
self.phi5.maximize([('x1', 'x2')])
self.assertEqual(self.phi5, DiscreteFactor([('x2', 'x3'), ('x3', (1, 'x4'))], [3, 4],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]))
self.phi5.maximize([('x2', 'x3')])
self.assertEqual(self.phi5, DiscreteFactor([('x3', (1, 'x4'))], [4], [20, 21, 22, 23]))
def test_maximize_list(self):
self.phi1.maximize(['x1', 'x2'])
self.assertEqual(self.phi1, DiscreteFactor(['x3'], [2], [10, 11]))
self.phi5.maximize([('x1', 'x2'), ('x2', 'x3')])
self.assertEqual(self.phi5, DiscreteFactor([('x3', (1, 'x4'))], [4], [20, 21, 22, 23]))
def test_maximize_shape(self):
values = ['A', 'D', 'F', 'H']
phi3_max = self.phi3.maximize(values, inplace=False)
# Previously a sorting error caused these to be different
np_test.assert_array_equal(phi3_max.values.shape, phi3_max.cardinality)
phi = DiscreteFactor([self.var1, self.var2, self.var3], [3, 2, 2], [3, 2, 4, 5, 9, 8, 3, 2, 4, 5, 9, 8])
phi_max = phi.marginalize([self.var1, self.var2], inplace=False)
np_test.assert_array_equal(phi_max.values.shape, phi_max.cardinality)
def test_maximize_scopeerror(self):
self.assertRaises(ValueError, self.phi.maximize, ['x10'])
def test_maximize_typeerror(self):
self.assertRaises(TypeError, self.phi.maximize, 'x1')
def tearDown(self):
del self.phi
del self.phi1
del self.phi2
del self.phi3
del self.phi4
del self.phi5
del self.phi6
del self.phi7
del self.phi8
del self.phi9
del self.phi10
class TestHash:
# Used to check the hash function of DiscreteFactor class.
def __init__(self, x, y):
self.x = x
self.y = y
def __hash__(self):
return hash(str(self.x) + str(self.y))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.x == other.x and self.y == other.y
class TestTabularCPDInit(unittest.TestCase):
def test_cpd_init(self):
cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1]])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
self.assertEqual(list(cpd.variables), ['grade'])
np_test.assert_array_equal(cpd.cardinality, np.array([3]))
np_test.assert_array_almost_equal(cpd.values, np.array([0.1, 0.1, 0.1]))
values = [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]
evidence = ['intel', 'diff']
evidence_card = [3, 2]
valid_value_inputs = [values, np.asarray(values)]
valid_evidence_inputs = [evidence, set(evidence), np.asarray(evidence)]
valid_evidence_card_inputs = [evidence_card, np.asarray(evidence_card)]
for value in valid_value_inputs:
for evidence in valid_evidence_inputs:
for evidence_card in valid_evidence_card_inputs:
cpd = TabularCPD('grade', 3, values, evidence=['intel', 'diff'], evidence_card=[3, 2])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
np_test.assert_array_equal(cpd.cardinality, np.array([3, 3, 2]))
self.assertListEqual(list(cpd.variables), ['grade', 'intel', 'diff'])
np_test.assert_array_equal(cpd.values, np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.8, 0.8, 0.8, 0.8, 0.8, 0.8]).reshape(3, 3, 2))
cpd = TabularCPD('grade', 3, [[0.1, 0.1],
[0.1, 0.1],
[0.8, 0.8]],
evidence=['evi1'], evidence_card=[2.0])
self.assertEqual(cpd.variable, 'grade')
self.assertEqual(cpd.variable_card, 3)
np_test.assert_array_equal(cpd.cardinality, np.array([3, 2]))
self.assertListEqual(list(cpd.variables), ['grade', 'evi1'])
np_test.assert_array_equal(cpd.values, np.array([0.1, 0.1,
0.1, 0.1,
0.8, 0.8]).reshape(3, 2))
def test_cpd_init_event_card_not_int(self):
self.assertRaises(TypeError, TabularCPD, 'event', '2', [[0.1, 0.9]])
def test_cpd_init_cardinality_not_specified(self):
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1', 'evi2'], [5])
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1', 'evi2'], [5.0])
self.assertRaises(ValueError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
['evi1'], [5, 6])
self.assertRaises(TypeError, TabularCPD, 'event', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
'evi1', [5, 6])
def test_cpd_init_value_not_2d(self):
self.assertRaises(TypeError, TabularCPD, 'event', 3, [[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]],
['evi1', 'evi2'], [5, 6])
class TestTabularCPDMethods(unittest.TestCase):
def setUp(self):
self.cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['intel', 'diff'], evidence_card=[3, 2])
self.cpd2 = TabularCPD('J', 2, [[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]],
evidence=['A', 'B', 'C'], evidence_card=[2, 2, 2])
def test_marginalize_1(self):
self.cpd.marginalize(['diff'])
self.assertEqual(self.cpd.variable, 'grade')
self.assertEqual(self.cpd.variable_card, 3)
self.assertListEqual(list(self.cpd.variables), ['grade', 'intel'])
np_test.assert_array_equal(self.cpd.cardinality, np.array([3, 3]))
np_test.assert_array_equal(self.cpd.values.ravel(), np.array([0.1, 0.1, 0.1,
0.1, 0.1, 0.1,
0.8, 0.8, 0.8]))
def test_marginalize_2(self):
self.assertRaises(ValueError, self.cpd.marginalize, ['grade'])
def test_marginalize_3(self):
copy_cpd = self.cpd.copy()
copy_cpd.marginalize(['intel', 'diff'])
self.cpd.marginalize(['intel'])
self.cpd.marginalize(['diff'])
np_test.assert_array_almost_equal(self.cpd.values, copy_cpd.values)
def test_normalize(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
cpd_un_normalized.normalize()
np_test.assert_array_almost_equal(cpd_un_normalized.values, np.array([[[0.63636364, 0.33333333],
[0.6, 0.2]],
[[0.36363636, 0.66666667],
[0.4, 0.8]]]))
def test_normalize_not_in_place(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
np_test.assert_array_almost_equal(cpd_un_normalized.normalize(inplace=False).values,
np.array([[[0.63636364, 0.33333333],
[0.6, 0.2]],
[[0.36363636, 0.66666667],
[0.4, 0.8]]]))
def test_normalize_original_safe(self):
cpd_un_normalized = TabularCPD('grade', 2, [[0.7, 0.2, 0.6, 0.2], [0.4, 0.4, 0.4, 0.8]],
['intel', 'diff'], [2, 2])
cpd_un_normalized.normalize(inplace=False)
np_test.assert_array_almost_equal(cpd_un_normalized.values, np.array([[[0.7, 0.2], [0.6, 0.2]],
[[0.4, 0.4], [0.4, 0.8]]]))
def test__repr__(self):
grade_cpd = TabularCPD('grade', 3, [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['intel', 'diff'], evidence_card=[3, 2])
intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
diff_cpd = TabularCPD('grade', 3, [[0.1, 0.1], [0.1, 0.1], [0.8, 0.8]], evidence=['diff'], evidence_card=[2])
self.assertEqual(repr(grade_cpd), '<TabularCPD representing P(grade:3 | intel:3, diff:2) at {address}>'
.format(address=hex(id(grade_cpd))))
self.assertEqual(repr(intel_cpd), '<TabularCPD representing P(intel:3) at {address}>'
.format(address=hex(id(intel_cpd))))
self.assertEqual(repr(diff_cpd), '<TabularCPD representing P(grade:3 | diff:2) at {address}>'
.format(address=hex(id(diff_cpd))))
def test_copy(self):
copy_cpd = self.cpd.copy()
np_test.assert_array_equal(self.cpd.get_cpd(), copy_cpd.get_cpd())
def test_copy_original_safe(self):
copy_cpd = self.cpd.copy()
copy_cpd.reorder_parents(['diff', 'intel'])
np_test.assert_array_equal(self.cpd.get_cpd(), np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]))
def test_reduce_1(self):
self.cpd.reduce([('diff', 0)])
np_test.assert_array_equal(self.cpd.get_cpd(), np.array([[0.1, 0.1, 0.1],
[0.1, 0.1, 0.1],
[0.8, 0.8, 0.8]]))
def test_reduce_2(self):
self.cpd.reduce([('intel', 0)])
np_test.assert_array_equal(self.cpd.get_cpd(), np.array([[0.1, 0.1],
[0.1, 0.1],
[0.8, 0.8]]))
def test_reduce_3(self):
self.cpd.reduce([('intel', 0), ('diff', 0)])
np_test.assert_array_equal(self.cpd.get_cpd(), np.array([[0.1],
[0.1],
[0.8]]))
def test_reduce_4(self):
self.assertRaises(ValueError, self.cpd.reduce, [('grade', 0)])
def test_reduce_5(self):
copy_cpd = self.cpd.copy()
copy_cpd.reduce([('intel', 2), ('diff', 1)])
self.cpd.reduce([('intel', 2)])
self.cpd.reduce([('diff', 1)])
np_test.assert_array_almost_equal(self.cpd.values, copy_cpd.values)
def test_get_cpd(self):
np_test.assert_array_equal(self.cpd.get_cpd(), np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]]))
def test_reorder_parents_inplace(self):
new_vals = self.cpd2.reorder_parents(['B', 'A', 'C'])
np_test.assert_array_equal(new_vals, np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
np_test.assert_array_equal(self.cpd2.get_cpd(), np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
def test_reorder_parents(self):
new_vals = self.cpd2.reorder_parents(['B', 'A', 'C'])
np_test.assert_array_equal(new_vals, np.array([[0.9, 0.3, 0.8, 0.8, 0.9, 0.3, 0.4, 0.4],
[0.1, 0.7, 0.2, 0.2, 0.1, 0.7, 0.6, 0.6]]))
def test_reorder_parents_no_effect(self):
self.cpd2.reorder_parents(['C', 'A', 'B'], inplace=False)
np_test.assert_array_equal(self.cpd2.get_cpd(), np.array([[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]]))
def test_reorder_parents_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.cpd2.reorder_parents(['A', 'B', 'C'], inplace=False)
assert("Same ordering provided as current" in str(w[-1].message))
np_test.assert_array_equal(self.cpd2.get_cpd(), np.array([[0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4],
[0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]]))
def tearDown(self):
del self.cpd
class TestJointProbabilityDistributionInit(unittest.TestCase):
def test_jpd_init(self):
jpd = JPD(['x1', 'x2', 'x3'], [2, 3, 2], np.ones(12) / 12)
np_test.assert_array_equal(jpd.cardinality, np.array([2, 3, 2]))
np_test.assert_array_equal(jpd.values, np.ones(12).reshape(2, 3, 2) / 12)
self.assertEqual(jpd.get_cardinality(['x1', 'x2', 'x3']), {'x1': 2, 'x2': 3, 'x3': 2})
def test_jpd_init_exception(self):
self.assertRaises(ValueError, JPD, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8))
class TestJointProbabilityDistributionMethods(unittest.TestCase):
def setUp(self):
self.jpd = JPD(['x1', 'x2', 'x3'], [2, 3, 2], values=np.ones(12) / 12)
self.jpd1 = JPD(['x1', 'x2', 'x3'], [2, 3, 2], values=np.ones(12) / 12)
self.jpd2 = JPD(['x1', 'x2', 'x3'], [2, 2, 3],
[0.126, 0.168, 0.126, 0.009, 0.045, 0.126, 0.252, 0.0224, 0.0056, 0.06, 0.036, 0.024])
self.jpd3 = JPD(['x1', 'x2', 'x3'], [2, 2, 2],
[5.0e-04, 5.225e-04, 0.00, 8.9775e-03, 9.9e-03, 5.39055e-02, 0.00, 9.261945e-01])
def test_jpd_marginal_distribution_list(self):
self.jpd.marginal_distribution(['x1', 'x2'])
np_test.assert_array_almost_equal(self.jpd.values, np.array([[0.16666667, 0.16666667, 0.16666667],
[0.16666667, 0.16666667, 0.16666667]]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2, 3]))
dic = {'x1': 2, 'x2': 3}
self.assertEqual(self.jpd.get_cardinality(['x1', 'x2']), dic)
self.assertEqual(self.jpd.scope(), ['x1', 'x2'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.marginal_distribution(['x1', 'x2'], inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(new_jpd == self.jpd)
def test_marginal_distribution_str(self):
self.jpd.marginal_distribution('x1')
np_test.assert_array_almost_equal(self.jpd.values, np.array([0.5, 0.5]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2]))
self.assertEqual(self.jpd.scope(), ['x1'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.marginal_distribution('x1', inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(self.jpd == new_jpd)
def test_conditional_distribution_list(self):
self.jpd = self.jpd1.copy()
self.jpd.conditional_distribution([('x1', 1), ('x2', 0)])
np_test.assert_array_almost_equal(self.jpd.values, np.array([0.5, 0.5]))
np_test.assert_array_equal(self.jpd.cardinality, np.array([2]))
self.assertEqual(self.jpd.scope(), ['x3'])
np_test.assert_almost_equal(np.sum(self.jpd.values), 1)
new_jpd = self.jpd1.conditional_distribution([('x1', 1), ('x2', 0)], inplace=False)
self.assertTrue(self.jpd1 != self.jpd)
self.assertTrue(self.jpd == new_jpd)
def test_check_independence(self):
self.assertTrue(self.jpd2.check_independence(['x1'], ['x2']))
self.assertRaises(TypeError, self.jpd2.check_independence, 'x1', ['x2'])
self.assertRaises(TypeError, self.jpd2.check_independence, ['x1'], 'x2')
self.assertRaises(TypeError, self.jpd2.check_independence, ['x1'], ['x2'], 'x3')
self.assertFalse(self.jpd2.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
self.assertFalse(self.jpd2.check_independence(['x1'], ['x2'], [('x3', 0)]))
self.assertTrue(self.jpd1.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
self.assertTrue(self.jpd1.check_independence(['x1'], ['x2'], [('x3', 1)]))
self.assertTrue(self.jpd3.check_independence(['x1'], ['x2'], ('x3',), condition_random_variable=True))
def test_get_independencies(self):
independencies = Independencies(['x1', 'x2'], ['x2', 'x3'], ['x3', 'x1'])
independencies1 = Independencies(['x1', 'x2'])
self.assertEqual(self.jpd1.get_independencies(), independencies)
self.assertEqual(self.jpd2.get_independencies(), independencies1)
self.assertEqual(self.jpd1.get_independencies([('x3', 0)]), independencies1)
self.assertEqual(self.jpd2.get_independencies([('x3', 0)]), Independencies())
def test_minimal_imap(self):
bm = self.jpd1.minimal_imap(order=['x1', 'x2', 'x3'])
self.assertEqual(sorted(bm.edges()), sorted([('x1', 'x3'), ('x2', 'x3')]))
bm = self.jpd1.minimal_imap(order=['x2', 'x3', 'x1'])
self.assertEqual(sorted(bm.edges()), sorted([('x2', 'x1'), ('x3', 'x1')]))
bm = self.jpd2.minimal_imap(order=['x1', 'x2', 'x3'])
self.assertEqual(bm.edges(), [])
bm = self.jpd2.minimal_imap(order=['x1', 'x2'])
self.assertEqual(bm.edges(), [])
def test_repr(self):
self.assertEqual(repr(self.jpd1), '<Joint Distribution representing P(x1:2, x2:3, x3:2) at {address}>'.format(
address=hex(id(self.jpd1))))
def test_is_imap(self):
G1 = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
grade_cpd = TabularCPD('grade', 3,
[[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
evidence=['diff', 'intel'],
evidence_card=[2, 3])
G1.add_cpds(diff_cpd, intel_cpd, grade_cpd)
val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
jpd = JPD(['diff', 'intel', 'grade'], [2, 3, 3], val)
self.assertTrue(jpd.is_imap(G1))
self.assertRaises(TypeError, jpd.is_imap, MarkovModel())
def tearDown(self):
del self.jpd
del self.jpd1
del self.jpd2
del self.jpd3
#
# class TestTreeCPDInit(unittest.TestCase):
# def test_init_single_variable_nodes(self):
# tree = TreeCPD([('B', DiscreteFactor(['A'], [2], [0.8, 0.2]), 0),
# ('B', 'C', 1),
# ('C', DiscreteFactor(['A'], [2], [0.1, 0.9]), 0),
# ('C', 'D', 1),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), 0),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), 1)])
#
# self.assertTrue('B' in tree.nodes())
# self.assertTrue('C' in tree.nodes())
# self.assertTrue('D' in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.1, 0.9]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.4, 0.6]) in tree.nodes())
#
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.1, 0.9]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.edges()))
# self.assertTrue(('B', DiscreteFactor(['A'], [2], [0.4, 0.6]) in tree.edges()))
# self.assertTrue(('C', 'D') in tree.edges())
# self.assertTrue(('B', 'C') in tree.edges())
#
# self.assertEqual(tree['B'][DiscreteFactor(['A'], [2], [0.8, 0.2])]['label'], 0)
# self.assertEqual(tree['B']['C']['label'], 1)
# self.assertEqual(tree['C'][DiscreteFactor(['A'], [2], [0.1, 0.9])]['label'], 0)
# self.assertEqual(tree['C']['D']['label'], 1)
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.9, 0.1])]['label'], 0)
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.4, 0.6])]['label'], 1)
#
# self.assertRaises(ValueError, tree.add_edges_from, [('F', 'G')])
#
# def test_init_self_loop(self):
# self.assertRaises(ValueError, TreeCPD, [('B', 'B', 0)])
#
# def test_init_cycle(self):
# self.assertRaises(ValueError, TreeCPD, [('A', 'B', 0), ('B', 'C', 1), ('C', 'A', 0)])
#
# def test_init_multi_variable_nodes(self):
# tree = TreeCPD([(('B', 'C'), DiscreteFactor(['A'], [2], [0.8, 0.2]), (0, 0)),
# (('B', 'C'), 'D', (0, 1)),
# (('B', 'C'), DiscreteFactor(['A'], [2], [0.1, 0.9]), (1, 0)),
# (('B', 'C'), 'E', (1, 1)),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), 0),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), 1),
# ('E', DiscreteFactor(['A'], [2], [0.3, 0.7]), 0),
# ('E', DiscreteFactor(['A'], [2], [0.8, 0.2]), 1)
# ])
#
# self.assertTrue(('B', 'C') in tree.nodes())
# self.assertTrue('D' in tree.nodes())
# self.assertTrue('E' in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.nodes())
# self.assertTrue(DiscreteFactor(['A'], [2], [0.9, 0.1]) in tree.nodes())
#
# self.assertTrue((('B', 'C'), DiscreteFactor(['A'], [2], [0.8, 0.2]) in tree.edges()))
# self.assertTrue((('B', 'C'), 'E') in tree.edges())
# self.assertTrue(('D', DiscreteFactor(['A'], [2], [0.4, 0.6])) in tree.edges())
# self.assertTrue(('E', DiscreteFactor(['A'], [2], [0.8, 0.2])) in tree.edges())
#
# self.assertEqual(tree[('B', 'C')][DiscreteFactor(['A'], [2], [0.8, 0.2])]['label'], (0, 0))
# self.assertEqual(tree[('B', 'C')]['D']['label'], (0, 1))
# self.assertEqual(tree['D'][DiscreteFactor(['A'], [2], [0.9, 0.1])]['label'], 0)
# self.assertEqual(tree['E'][DiscreteFactor(['A'], [2], [0.3, 0.7])]['label'], 0)
#
#
# class TestTreeCPD(unittest.TestCase):
# def setUp(self):
# self.tree1 = TreeCPD([('B', DiscreteFactor(['A'], [2], [0.8, 0.2]), '0'),
# ('B', 'C', '1'),
# ('C', DiscreteFactor(['A'], [2], [0.1, 0.9]), '0'),
# ('C', 'D', '1'),
# ('D', DiscreteFactor(['A'], [2], [0.9, 0.1]), '0'),
# ('D', DiscreteFactor(['A'], [2], [0.4, 0.6]), '1')])
#
# self.tree2 = TreeCPD([('C','A','0'),('C','B','1'),
# ('A', DiscreteFactor(['J'], [2], [0.9, 0.1]), '0'),
# ('A', DiscreteFactor(['J'], [2], [0.3, 0.7]), '1'),
# ('B', DiscreteFactor(['J'], [2], [0.8, 0.2]), '0'),
# ('B', DiscreteFactor(['J'], [2], [0.4, 0.6]), '1')])
#
# def test_add_edge(self):
# self.tree1.add_edge('yolo', 'yo', 0)
# self.assertTrue('yolo' in self.tree1.nodes() and 'yo' in self.tree1.nodes())
# self.assertTrue(('yolo', 'yo') in self.tree1.edges())
# self.assertEqual(self.tree1['yolo']['yo']['label'], 0)
#
# def test_add_edges_from(self):
# self.tree1.add_edges_from([('yolo', 'yo', 0), ('hello', 'world', 1)])
# self.assertTrue('yolo' in self.tree1.nodes() and 'yo' in self.tree1.nodes() and
# 'hello' in self.tree1.nodes() and 'world' in self.tree1.nodes())
# self.assertTrue(('yolo', 'yo') in self.tree1.edges())
# self.assertTrue(('hello', 'world') in self.tree1.edges())
# self.assertEqual(self.tree1['yolo']['yo']['label'], 0)
# self.assertEqual(self.tree1['hello']['world']['label'], 1)
#
# def test_to_tabular_cpd(self):
# tabular_cpd = self.tree1.to_tabular_cpd()
# self.assertEqual(tabular_cpd.evidence, ['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['A', 'B', 'C', 'D'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([0.8, 0.8, 0.8, 0.8, 0.1, 0.1, 0.9, 0.4,
# 0.2, 0.2, 0.2, 0.2, 0.9, 0.9, 0.1, 0.6]))
#
# tabular_cpd = self.tree2.to_tabular_cpd()
# self.assertEqual(tabular_cpd.evidence, ['A', 'B', 'C'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['J', 'C', 'B', 'A'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([ 0.9, 0.3, 0.9, 0.3, 0.8, 0.8, 0.4, 0.4,
# 0.1, 0.7, 0.1, 0.7, 0.2, 0.2, 0.6, 0.6]))
#
# @unittest.skip('Not implemented yet')
# def test_to_tabular_cpd_parent_order(self):
# tabular_cpd = self.tree1.to_tabular_cpd('A', parents_order=['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence, ['D', 'C', 'B'])
# self.assertEqual(tabular_cpd.evidence_card, [2, 2, 2])
# self.assertEqual(list(tabular_cpd.variables), ['A', 'D', 'C', 'B'])
# np_test.assert_array_equal(tabular_cpd.values,
# np.array([0.8, 0.1, 0.8, 0.9, 0.8, 0.1, 0.8, 0.4,
# 0.2, 0.9, 0.2, 0.1, 0.2, 0.9, 0.2, 0.6]))
#
# tabular_cpd = self.tree2.to_tabular_cpd('A', parents_order=['E', 'D', 'C', 'B'])
#
# @unittest.skip('Not implemented yet')
# def test_to_rule_cpd(self):
# rule_cpd = self.tree1.to_rule_cpd()
# self.assertEqual(rule_cpd.cardinality(), {'A': 2, 'B': 2, 'C': 2, 'D': 2})
# self.assertEqual(rule_cpd.scope(), {'A', 'B', 'C', 'D'})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.1,
# ('A_0', 'B_1', 'C_1', 'D_0'): 0.9,
# ('A_1', 'B_1', 'C_1', 'D_0'): 0.1,
# ('A_0', 'B_1', 'C_1', 'D_1'): 0.4,
# ('A_1', 'B_!', 'C_1', 'D_1'): 0.6})
#
# rule_cpd = self.tree2.to_rule_cpd()
# self.assertEqual(rule_cpd.cardinality(), {'A': 2, 'B': 2, 'C': 2, 'D': 2, 'E': 2})
# self.assertEqual(rule_cpd.scope(), {'A', 'B', 'C', 'D', 'E'})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0', 'C_0'): 0.8,
# ('A_1', 'B_0', 'C_0'): 0.2,
# ('A_0', 'B_0', 'C_1', 'D_0'): 0.9,
# ('A_1', 'B_0', 'C_1', 'D_0'): 0.1,
# ('A_0', 'B_0', 'C_1', 'D_1'): 0.4,
# ('A_1', 'B_0', 'C_1', 'D_1'): 0.6,
# ('A_0', 'B_1', 'C_0'): 0.1,
# ('A_1', 'B_1', 'C_0'): 0.9,
# ('A_0', 'B_1', 'C_1', 'E_0'): 0.3,
# ('A_1', 'B_1', 'C_1', 'E_0'): 0.7,
# ('A_0', 'B_1', 'C_1', 'E_1'): 0.8,
# ('A_1', 'B_1', 'C_1', 'E_1'): 0.2})
#
#
# class TestRuleCPDInit(unittest.TestCase):
# def test_init_without_errors_rules_none(self):
# rule_cpd = RuleCPD('A')
# self.assertEqual(rule_cpd.variable, 'A')
#
# def test_init_without_errors_rules_not_none(self):
# rule_cpd = RuleCPD('A', {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(rule_cpd.variable, 'A')
# self.assertEqual(rule_cpd.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
#
# def test_init_with_errors(self):
# self.assertRaises(ValueError, RuleCPD, 'A', {('A_0',): 0.5,
# ('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
#
#
# class TestRuleCPDMethods(unittest.TestCase):
# def setUp(self):
# self.rule_cpd_with_rules = RuleCPD('A', {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6})
# self.rule_cpd_without_rules = RuleCPD('A')
#
# def test_add_rules_single(self):
# self.rule_cpd_with_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_with_rules.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_with_rules.variable, 'A')
# self.rule_cpd_without_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_without_rules.rules, {('A_0', 'B_1', 'C_1'): 0.9})
# self.assertEqual(self.rule_cpd_without_rules.variable, 'A')
#
# def test_add_rules_multiple(self):
# self.rule_cpd_with_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_with_rules.rules, {('A_0', 'B_0'): 0.8,
# ('A_1', 'B_0'): 0.2,
# ('A_0', 'B_1', 'C_0'): 0.4,
# ('A_1', 'B_1', 'C_0'): 0.6,
# ('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_with_rules.variable, 'A')
# self.rule_cpd_without_rules.add_rules({('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_without_rules.rules, {('A_0', 'B_1', 'C_1'): 0.9,
# ('A_1', 'B_1', 'C_1'): 0.1})
# self.assertEqual(self.rule_cpd_without_rules.variable, 'A')
#
# def test_add_rules_error(self):
# self.assertRaises(ValueError, self.rule_cpd_with_rules.add_rules, {('A_0',): 0.8})
#
# def test_scope(self):
# self.assertEqual(self.rule_cpd_with_rules.scope(), {'A', 'B', 'C'})
# self.assertEqual(self.rule_cpd_without_rules.scope(), set())
#
# def test_cardinality(self):
# self.assertEqual(self.rule_cpd_with_rules.cardinality(), {'A': 2, 'B': 2, 'C': 1})
# self.assertEqual(self.rule_cpd_without_rules.cardinality(), {})
#
# def tearDown(self):
# del self.rule_cpd_without_rules
#
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import warnings
from functools import wraps
from flask import abort, jsonify, request
from marshmallow.exceptions import ValidationError
from six import string_types
from werkzeug.wrappers import Response as WerkzeugResponse
from eduid_common.api.messages import FluxData, error_response
from eduid_common.api.schemas.models import FluxFailResponse, FluxResponseStatus, FluxSuccessResponse
from eduid_common.api.utils import get_user
from eduid_common.session import session
__author__ = 'lundberg'
def require_eppn(f):
@wraps(f)
def require_eppn_decorator(*args, **kwargs):
eppn = session.get('user_eppn', None)
# If the user is logged in and has a session
# pass on the request to the decorated view
# together with the eppn of the logged in user.
if eppn:
kwargs['eppn'] = eppn
return f(*args, **kwargs)
abort(401)
return require_eppn_decorator
def require_user(f):
@wraps(f)
def require_user_decorator(*args, **kwargs):
user = get_user()
kwargs['user'] = user
return f(*args, **kwargs)
return require_user_decorator
def can_verify_identity(f):
@wraps(f)
def verify_identity_decorator(*args, **kwargs):
user = get_user()
# For now a user can just have one verified NIN
if user.nins.primary is not None:
# TODO: Make this a CommonMsg I guess
return error_response(message='User is already verified')
# A user can not verify a nin if another previously was verified
locked_nin = user.locked_identity.find('nin')
if locked_nin and locked_nin.number != kwargs['nin']:
# TODO: Make this a CommonMsg I guess
return error_response(message='Another nin is already registered for this user')
return f(*args, **kwargs)
return verify_identity_decorator
class MarshalWith(object):
"""
Decorator to format the data returned from a Flask view and ensure it conforms to a marshmallow schema.
A common usage is to use this to format the response as a Flux Standard Action
(https://github.com/redux-utilities/flux-standard-action) by using a schema that has FluxStandardAction
as superclass, or as a mixin.
See the documentation of the FluxResponse class, or the link above, for more information about the
on-the-wire format of these Flux Standard Actions.
"""
def __init__(self, schema):
self.schema = schema
def __call__(self, f):
@wraps(f)
def marshal_decorator(*args, **kwargs):
# Call the Flask view, which is expected to return a FluxData instance,
# or in special cases an WerkzeugResponse (e.g. when a redirect is performed).
ret = f(*args, **kwargs)
if isinstance(ret, WerkzeugResponse):
# No need to Marshal again, someone else already did that
return ret
if isinstance(ret, dict):
# TODO: Backwards compatibility mode - work on removing the need for this
ret = FluxData(FluxResponseStatus.OK, payload=ret)
if not isinstance(ret, FluxData):
raise TypeError('Data returned from Flask view was not a FluxData (or WerkzeugResponse) instance')
if ret.status != FluxResponseStatus.OK:
_flux_response = FluxFailResponse(request, payload=ret.payload)
else:
_flux_response = FluxSuccessResponse(request, payload=ret.payload)
return jsonify(self.schema().dump(_flux_response.to_dict()))
return marshal_decorator
class UnmarshalWith(object):
def __init__(self, schema):
self.schema = schema
def __call__(self, f):
@wraps(f)
def unmarshal_decorator(*args, **kwargs):
try:
json_data = request.get_json()
if json_data is None:
json_data = {}
unmarshal_result = self.schema().load(json_data)
kwargs.update(unmarshal_result)
return f(*args, **kwargs)
except ValidationError as e:
response_data = FluxFailResponse(
request, payload={'error': e.normalized_messages(), 'csrf_token': session.get_csrf_token()}
)
return jsonify(response_data.to_dict())
return unmarshal_decorator
# https://stackoverflow.com/questions/2536307/how-do-i-deprecate-python-functions/40301488#40301488
def deprecated(reason):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
if isinstance(reason, string_types):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@wraps(func1)
def new_func1(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
fmt1.format(name=func1.__name__, reason=reason), category=DeprecationWarning, stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@wraps(func2)
def new_func2(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(fmt2.format(name=func2.__name__), category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason)))
@deprecated('Use eduid_common.api.decorators.deprecated instead')
class Deprecated(object):
"""
Mark deprecated functions with this decorator.
Attention! Use it as the closest one to the function you decorate.
:param message: The deprecation message
:type message: str | unicode
"""
def __init__(self, message=None):
self.message = message
def __call__(self, func):
if self.message is None:
self.message = 'Deprecated function {!r} called'.format(func.__name__)
@wraps(func)
def new_func(*args, **kwargs):
warnings.warn(self.message, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
# work around a bug in functools.wraps thats fixed in python 3.2
if getattr(new_func, '__wrapped__', None) is None:
new_func.__wrapped__ = func
return new_func
| |
# -*- coding: utf-8 -*-
"""
Created on Tue May 3 14:11:09 2016
Assorted Auger utility functions
@author: tkc
"""
#%%
import pandas as pd
import numpy as np
import os, glob, shutil, re # already run with functions
import datetime
import tkinter as tk
#%%
''' RUNNING BELOW FILE MANAGEMENT UTILITIES
destdir='H:\\Research_data\\Stardust\\C2010W\\Auger\\18Nov15\\sub\\'
movefiles(spelist,destdir)
excludelist=combinelist[combinelist['Areas']==2] # finds filenames with screwed up import
AugerParamLog=removefromlog(excludelist, AugerParamLog) # removes log entries from any dataframe by filename if in excludelist
AugerParamLog=removelistdups(AugerParamLog,'Evbreaks')
'''
def compsummary(Smdifcomp, Peaks, Peaksexcl):
''' Compositional summary that keeps at % and identifying fields only
can be used on integ or smdiff quant '''
AESsumm=Smdifcomp.copy()
mycols=['Filename', 'Sample', 'Comments', 'AESbasis', 'Phase']
# Handle quant including the excluded elements
missing=[i for i in Peaksexcl if i not in Peaks]
if len(missing)>0:
print(','.join(missing),' excluded peaks missing from peaks list')
for i, peak in enumerate(missing):
Peaksexcl.remove(peak)
missing=[i for i in Peaks if i not in Smdifcomp.columns]
if len(missing)>0:
print(','.join(missing),' peak not present in AES smdifcomp... removed')
for i, elem in enumerate(missing):
Peaks.remove(elem)
real=[i for i in Peaks if i not in Peaksexcl]
# order with real elems first and excluded second
for i, elem in enumerate(real):
mycols.append('%'+elem)
mycols.append('Total')
if 'Total' not in AESsumm.columns:
AESsumm['Total']=np.nan
# put excluded peaks at the end (after total)
for i, elem in enumerate(Peaksexcl):
mycols.append('%'+elem)
for index, row in AESsumm.iterrows():
peaksumm=0.0
# Compute at.% including all elems
for i, elem in enumerate(Peaks):
peaksumm+=row['%'+elem] # should be 1
# normalize peaks to unity
for i, elem in enumerate(Peaks):
AESsumm=AESsumm.set_value(index,'%'+elem,
AESsumm.loc[index]['%'+elem]/peaksumm)
# Redo the sum only for included (real) elems
peaksumm=0.0
for i, elem in enumerate(real):
peaksumm+=row['%'+elem]
# Now renormalize real peaks to unity
for i, elem in enumerate(real):
AESsumm=AESsumm.set_value(index,'%'+elem,
AESsumm.loc[index]['%'+elem]/peaksumm)
# total shows amount of signal in real peaks
AESsumm=AESsumm.set_value(index,'Total',
peaksumm)
AESsumm=AESsumm[mycols]
return AESsumm
def pickspectraGUI(spelist):
''' Quick method of interactively selecting spectral files for plotting
only elements with info in quant params csv files are selectable
returns dataframe subset (then pass to plotting functions)
'''
# All available elements/peaks are those with entries in Aesquantparams.csv
files=np.ndarray.tolist(spelist.Filenumber.unique())
root = tk.Tk()
root.title('Select files for plotting or quant')
varlist=[] # list of tkinter IntVars
for i, col in enumerate(files): # set up string variables
varlist.append(tk.IntVar())
varlist[i].set(1) # default to 1
def clearall():
''' Set all tkinter vars to zero '''
for i, col in enumerate(files): # set up string variables
varlist[i].set(0) # set default value based on elemdict
def selectall():
''' Set all tkinter vars to zero '''
for i, col in enumerate(files): # set up string variables
varlist[i].set(1) # set default value based on elemdict
def selectcombined():
''' Choose combined files (filenumber>100000) '''
for i, col in enumerate(files): # set up string variables
if files[i]>100000:
varlist[i].set(1) # set default value based on elemdict
else:
varlist[i].set(0) # set default value based on elemdict
for i, col in enumerate(files):
# choose row, col grid position (starting row 1)
thiscol=i%20 # 10 column setup
thisrow=i//20
ent=tk.Checkbutton(root, text=files[i], variable=varlist[i])
ent.grid(row=thisrow, column=thiscol)
# clear all
e=tk.Button(root, text='Clear all', command=clearall)
lastrow=len(files)//20+1
e.grid(row=lastrow, column=0)
# select all
e=tk.Button(root, text='Select all', command=selectall)
lastrow=len(files)//20+2
e.grid(row=lastrow, column=0)
e=tk.Button(root, text='Select combined', command=selectcombined)
lastrow=len(files)//20+3
e.grid(row=lastrow, column=0)
# add done button
f=tk.Button(root, text='done')
f.bind("<Button-1>", lambda event: root.destroy())
lastrow=len(files)//20+4
f.grid(row=lastrow, column=0)
root.mainloop()
filelist=[] # list of strings with plot number and x or y
for i, val in enumerate(varlist): # result in normal string, not tkinter StringVar
if val.get()==1:
filelist.append(files[i]) # add element if box is checked
spelist=spelist[spelist['Filenumber'].isin(filelist)]
return spelist
def pickelemsGUI(AESquantparams, Smdifpeakslog, Integquantlog):
''' Quick method of interactively selecting elements/lines for plotting
has some hard-coded presets that can be changed using preset dictionaries below
only elements with info in quant params csv files are selectable
Note.. only tkinter variables exist after root.destroy
'''
# Subset of elements selected (on) by default
elemdict={'S':1,'C':1,'Ti':1,'O':1,'Fe1':1,'Fe2':1,'Na':1,'Mg':1,'Al':1,'Si':1,'Fe':1,'Ca':1}
preset1={'S':1,'Mg':1,'Si':1,'Fe':1,'Ca':1,'Fe2':1}
preset2={'S':1,'Mg':1,'Si':1,'Fe':1,'Ca':1,'O':1}
# All available elements/peaks are those with entries in Aesquantparams.csv
elems=np.ndarray.tolist(AESquantparams.element.unique())
root = tk.Tk()
varlist=[] # list of tkinter IntVars
for i, col in enumerate(elems): # set up string variables
varlist.append(tk.IntVar())
val=elemdict.get(col,0) # set to 1 or 0 based on above default dictionary
varlist[i].set(val) # set default value based on elemdict
tk.Label(root, text='Select elements for plotting or quant').grid(row=0,column=0)
def choose1():
''' Have available preset defaults and adjust checkbox values '''
# preset1={'S':1,'Mg':1,'Si':1,'Fe':1,'Ca':1,'Fe2':1}
# Still have to pass these through as tkinter ints
for i, col in enumerate(elems): # set up string variables
val=preset1.get(col,0) # set to 1 or 0 based on above default dictionary
varlist[i].set(val) # set default value based on elemdict
def choose2():
''' Have available preset defaults and adjust checkbox values '''
# preset2={'S':1,'Mg':1,'Si':1,'Fe':1,'Ca':1,'Fe2':1}
# Still have to pass these through as tkinter ints
for i, col in enumerate(elems): # set up string variables
val=preset2.get(col,0) # set to 1 or 0 based on above default dictionary
varlist[i].set(val) # set default value based on elemdict
def getpriorderiv():
''' Select elements used in prior deriv quant (for consistency)
uses smdiff peaks (not integ peaks) '''
try:
prior=np.ndarray.tolist(Smdifpeakslog.PeakID.unique())
for i, col in enumerate(elems):
if col in prior:
varlist[i].set(1)
else:
varlist[i].set(0)
except: # handles no prior quant
pass
def getpriorinteg():
''' Select elements used in prior integ/direct peak quant (for consistency)
uses smdiff peaks (not integ peaks) '''
try:
prior=np.ndarray.tolist(Integquantlog.Element.unique())
for i, col in enumerate(elems):
if col in prior:
varlist[i].set(1)
else:
varlist[i].set(0)
except: # handles no prior quant
pass
def clearall():
''' Set all tkinter vars to zero '''
for i, col in enumerate(elems): # set up string variables
varlist[i].set(0) # set default value based on elemdict
for i, col in enumerate(elems):
# choose row, col grid position (starting row 1)
thisrow=i%3+1 # three column setup
thiscol=i//3
ent=tk.Checkbutton(root, text=elems[i], variable=varlist[i])
ent.grid(row=thisrow, column=thiscol)
# Add preset 1 button (defined above)
els=list(preset1)
mystr=', '.join(els)
c=tk.Button(root, text=mystr, command=choose1)
lastrow=len(elems)%3+2
c.grid(row=lastrow, column=0)
# Add preset 2 button
els=list(preset2)
mystr=', '.join(els)
d=tk.Button(root, text=mystr, command=choose2)
lastrow=len(elems)%3+3
d.grid(row=lastrow, column=0)
# clear all
e=tk.Button(root, text='Clear all', command=clearall)
lastrow=len(elems)%3+4
e.grid(row=lastrow, column=0)
# select prior smdiff deriv elements
f=tk.Button(root, text='Select prior deriv elements', command=getpriorderiv)
lastrow=len(elems)%3+5
f.grid(row=lastrow, column=0)
# select prior integ/direct peak quant elements
f=tk.Button(root, text='Select prior integ elements', command=getpriorinteg)
lastrow=len(elems)%3+6
f.grid(row=lastrow, column=0)
# add done button
g=tk.Button(root, text='done')
g.bind("<Button-1>", lambda event: root.destroy())
lastrow=len(elems)%3+7
g.grid(row=lastrow, column=0)
root.mainloop()
elemlist=[] # list of strings with plot number and x or y
for i, val in enumerate(varlist): # result in normal string, not tkinter StringVar
if val.get()==1:
elemlist.append(elems[i]) # add element if box is checked
return elemlist
def loadsubfiles():
''' Load of standard sub spe files (before combination via averaging from working Auger data directory '''
# find and return sub-files (combinable spe files collected in sequence using AutoTool loops)
if os.path.isfile('Augerparamlog.csv'):
AugerParamLog=pd.read_csv('Augerparamlog.csv', encoding='cp437')
logfile=glob.glob('*Auger_log*') # find ... Auger_logbook.
subfilenums=[] # list of possible sub files
if len(logfile)==1: # found logbook
name=logfile[0]
if '.xls' in name: # open log tab of existing excel file
Augerlogbook=pd.read_excel(name, sheetname='Log')
if '.csv' in name: # open csv
Augerlogbook=pd.read_csv(name)
combinelist=Augerlogbook[(Augerlogbook['Lastnumber']>0)] # gets file ranges to combine via averaging
for index, row in combinelist.iterrows():
first=int(combinelist.loc[index]['Filenumber'])
last=int(combinelist.loc[index]['Lastnumber'])
for i in range(first, last+1):
subfilenums.append(i)
subspelist=AugerParamLog[AugerParamLog['Filenumber'].isin(subfilenums)]
excludemask=subspelist['Comments'].str.contains('exclude', case=False, na=False)
subspelist=subspelist.loc[~excludemask]
subspelist=subspelist.sort_values(['Filenumber'], ascending=True)
else:
print('Augerparamlog and/or Auger logbook not found.')
subspelist=pd.DataFrame()
if os.path.isfile('sub\\Smdifpeakslog_subs.csv'):
Smdifpeakslogsubs=pd.read_csv('sub\\Smdifpeakslog_subs.csv', encoding='cp437')
else:
print('Smdifpeakslogsubs not found.')
Smdifpeakslogsubs=pd.DataFrame()
if os.path.isfile('sub\\Backfitlog_subs.csv'):
Backfitlogsubs=pd.read_csv('sub\\Backfitlog_subs.csv', encoding='cp437')
else:
print('Backfitlogsubs not found.')
Backfitlogsubs=pd.DataFrame()
if os.path.isfile('sub\\Integquantlog.csv'):
Integquantlogsubs=pd.read_csv('sub\\Integquantlog_subs.csv', encoding='cp437')
else:
print('Integquantlogsubs not found.')
Integquantlogsubs=pd.DataFrame()
return subspelist, Smdifpeakslogsubs, Backfitlogsubs, Integquantlogsubs
def loadmainfiles():
''' Load of standard files from main Auger data directory '''
if os.path.isfile('Augerparamlog.csv'):
AugerParamLog=pd.read_csv('Augerparamlog.csv', encoding='cp437')
AugerParamLog['Comments']=AugerParamLog['Comments'].astype(str) # necessary if all blank/nan
spelist=AugerParamLog[(AugerParamLog['Areas']>=1)]
excludemask=spelist['Comments'].str.contains('exclude', case=False, na=False)
spelist=spelist.loc[~excludemask]
spelist=spelist.sort_values(['Filenumber'], ascending=True)
else:
print('Augerparamlog not found.')
AugerParamLog=pd.DataFrame()
spelist=pd.DataFrame()
if os.path.isfile('Smdifpeakslog.csv'):
Smdifpeakslog=pd.read_csv('Smdifpeakslog.csv', encoding='cp437')
else:
print('Smdifpeakslog not found.')
mycols=['Project', 'Filepath', 'Date', 'Sample', 'Filename', 'Filenumber',
'Areanumber', 'Peakenergy', 'Peakindex', 'PeakID', 'Shift', 'Negintensity',
'Posintensity', 'Pospeak', 'Amplitude', 'Peakwidth', 'Lowback',
'Lowbackamplitude', 'Highback', 'Highbackamplitude', 'Avgbackamplitude',
'Quantdetails', 'Comments', 'Adjamp']
Smdifpeakslog=pd.DataFrame(columns=mycols)
if os.path.isfile('Integquantlog.csv'):
Integquantlog=pd.read_csv('Integquantlog.csv', encoding='cp437')
else:
print('Integquantlog not found.')
mycols=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments',
'Areanumber', 'Element', 'Integcounts', 'Backcounts', 'Significance',
'Xc', 'Width', 'Peakarea', 'Y0','Rsquared','Numchannels']
Integquantlog=pd.DataFrame(columns=mycols)
if os.path.isfile('Backfitlog.csv'):
Backfitlog=pd.read_csv('Backfitlog.csv', encoding='cp437')
else:
print('Backfitlog not found.')
mycols=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments',
'Date', 'Areanumber', 'Element', 'Lower1', 'Lower2', 'Upper1',
'Upper2', 'Lowrange','Highrange','Peakshift', 'Fittype', 'P1',
'P1stdev','P2','P2stdev','Rval1','P3','P3stdev','P4','P4stdev',
'Rval2']
Backfitlog=pd.DataFrame(columns=mycols)
if os.path.isfile('C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv'):
AESquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv', encoding='cp437')
else:
print('AESquantparams not found.')
AESquantparams=pd.DataFrame()
return AugerParamLog, spelist, Smdifpeakslog, Integquantlog, Backfitlog, AESquantparams
def dropexcluded(df, spelist):
'''Only returns subset of df with filenumber in list passed by second argument'''
filelist=spelist.Filenumber.unique()
df=df[df.Filenumber.isin(filelist)]
return df
def writecomps(df, AESquantparams, Elems):
'''Store composition results in auto-named, autosaved xls along with subset of elements used and
second tab with k-factors employed, lines of element used'''
now=datetime.datetime.now()
elemstr=Elems[0]+'ampl'
if elemstr in df: # comp derived from deriv data
savename='smdiffcomp_'+datetime.date.strftime(now, "%d%b%y")+'.xlsx'
sheetname='smdiffcomp'
else: # comp from integ method
savename='integcomp_'+datetime.date.strftime(now, "%d%b%y")+'.xlsx'
sheetname='integcomp'
if os.path.exists(savename): # prompt for overwritting existing xls of same name
print('Enter Y to overwrite file', savename)
overwrite=input()
if overwrite!='Y':
print('Exiting without overwrite of existing file')
return
writer=pd.ExcelWriter(savename, engine='openpyxl', datetime_format='mm/dd/yy')
# Write elements to comment (writer is )
# elemstr='_'.join(Elems)
# writer["A1"].comment=elemstr
df.to_excel(writer,sheetname,index=False) # this overwrites existing file
# write date and elements list into comment
AESq=AESquantparams[AESquantparams.element.isin(Elems)]# subset of elements used in this quant (can be Fe or Fe+Fe2, etc.)
AESq.to_excel(writer,'Kfactors', index=False)
writer.save() # saves xls file with all modified data
return
def updatepaths(AugerParamLog):
'''Checks parameter log to make sure path to Auger csv files in AugerParamLog is correct
single log holds those in dir (combined or unique files) and those in subdir (source files
before average via combination)'''
path=os.getcwd() #
basepath='Research_data'+path.split('Research_data')[1]
for index, row in AugerParamLog.iterrows():
name=AugerParamLog.loc[index]['Filename']
if 'csv' not in name: # jpgs derived from sem and map files will not be in //sub
AugerParamLog=AugerParamLog.set_value(index,'FilePath',basepath)
continue
if os.path.exists(name):
AugerParamLog=AugerParamLog.set_value(index,'FilePath',basepath)
elif os.path.exists('sub\\'+name): # check for source file in /sub directory
AugerParamLog=AugerParamLog.set_value(index,'FilePath',basepath+'\\sub')
else: # file not found in directory or subdirectory (remove from AugerParamLog)
print(name,' removed from log... not found in dir or subdir')
AugerParamLog.drop(AugerParamLog.index[index], inplace=True)
return AugerParamLog
def assembledataset(paramloglist, integloglist, smdifloglist):
'''Construct master paramlog, integlog, and smdiflog for list of directories
used to create master data sets for later processing, plotting, etc.'''
# Structure of created files sometimes changes over time... ensure consistency
mycols=['Filenumber', 'Project', 'Filename', 'FilePath', 'Sample', 'Comments',
'Date', 'FieldofView', 'Type', 'Energy', 'GunVoltage', 'nA', 'Areas',
'Cycles', 'Timestep', 'Details', 'Evbreaks', 'Acqtime', 'Scanarea', 'X',
'Y', 'Z', 'Tilt', 'Rotation', 'ImageshiftX', 'ImageshiftY']
Masterparamlog=pd.DataFrame(columns=mycols)
# current df structure for integration results
mycols2=['Filenumber', 'Filename', 'Filepath', 'Sample', 'Comments', 'Areanumber', 'Element', 'Integcounts',
'Backcounts', 'Significance', 'Xc', 'Width', 'Peakarea', 'Y0','Rsquared','Numchannels']
Masterinteglog=pd.DataFrame(columns=mycols2) # empty frame
# current df structure for smdiff quant results (see auger_smdiff_functions)
mycols3=['Project','Filepath','Date','Sample','Filename','Filenumber','Areanumber','Peakenergy','Peakindex','PeakID','Shift',
'Negintensity','Posintensity','Pospeak','Amplitude','Peakwidth','Lowback','Lowbackamplitude','Highback','Highbackamplitude',
'Avgbackamplitude','Quantdetails']
Mastersmdiflog=pd.DataFrame(columns=mycols3) # empty smdif frame
for i, logfile in enumerate(paramloglist):
thisparam=pd.read_csv(logfile, encoding='cp437')
# check for and print differences between sets of columns
cols1=list(thisparam.columns.values)
uniquecols=[col for col in cols1 if col not in mycols]
if len(uniquecols)>0:
print('Unique columns in ', logfile,': ', ','.join(uniquecols))
missingcols=[col for col in mycols if col not in cols1]
if len(missingcols)>0:
print('Missing columns in ', logfile,': ', ','.join(missingcols))
Masterparamlog=pd.concat([Masterparamlog,thisparam], ignore_index=True)
for i, logfile in enumerate(integloglist):
thisinteg=pd.read_csv(logfile, encoding='cp437')
cols1=list(thisinteg.columns.values)
uniquecols=[col for col in cols1 if col not in mycols2]
if len(uniquecols)>0:
print('Unique columns in ', logfile,': ', ','.join(uniquecols))
missingcols=[col for col in mycols2 if col not in cols1]
if len(missingcols)>0:
print('Missing columns in ', logfile,': ', ','.join(missingcols))
Masterinteglog=pd.concat([Masterinteglog,thisinteg], ignore_index=True)
for i, logfile in enumerate(smdifloglist):
thissmdif=pd.read_csv(logfile, encoding='cp437')
cols1=list(thissmdif.columns.values)
uniquecols=[col for col in cols1 if col not in mycols3]
if len(uniquecols)>0:
print('Unique columns in ', logfile,': ', ','.join(uniquecols))
missingcols=[col for col in mycols3 if col not in cols1]
if len(missingcols)>0:
print('Missing columns in ', logfile,': ', ','.join(missingcols))
Mastersmdiflog=pd.concat([Mastersmdiflog,thissmdif], ignore_index=True)
return Masterparamlog, Masterinteglog, Mastersmdiflog
def checkparamlog(AugerParamLog, makeentry=False):
''' Checks the Auger parameters logbook against actual csv spectral files, correct path if necessary
prints out filenumbers that have a problem to console
'''
#TODO fix this so it works with new blanklog containing filenames (due to degenerate filenumber problem)
# find csv file in current and /sub directory
spelist=[] # list of filenumbers of spe files in directory
sublist=[] # csv files that are in sub directory
allcsvfiles=glob.glob('**/*.csv', recursive=True)
for i, name in enumerate(allcsvfiles): # deals with 3 cases (spe, sem or map)
if "sub\\" in name:
tempstring=name.split('sub\\')[1]
match=re.search(r'.\d+.csv',tempstring) # Auger spectra should have format like filename.100.csv
if match: # check if it's an Auger spectrum
sublist.append(tempstring)
else:
match=re.search(r'.\d+.csv',name) # Auger spectra should have format like filename.100.csv
if match:
spelist.append(name) # add to spe files in dir list
# Filter AugerParamLog into spe files in main vs those in sub
spelog=AugerParamLog[(AugerParamLog['Areas']>=1)]
excludemask=spelog['FilePath'].str.contains('sub', case=False, na=False)
sublogfiles=spelog.loc[excludemask]
spelog=spelog.loc[~excludemask]
logfilelist=spelog.Filename.unique()
logfilelist=np.ndarray.tolist(logfilelist)
sublogfilelist=sublogfiles.Filename.unique()
sublogfilelist=np.ndarray.tolist(sublogfilelist)
# compare spes in main
missingdata=[i for i in logfilelist if i not in spelist] # list comprehension for missing data file (but present in excel logbook)
missingentry=[i for i in spelist if i not in logfilelist]
for i, val in enumerate(missingdata):
print ('Data file number ', val, ' mentioned in AugerParamlog but missing from directory')
for i, val in enumerate(missingentry):
print ('Data file number ', val, ' present in directory but missing from AugerParamlog')
missingdata=[i for i in sublogfilelist if i not in sublist] # list comprehension for missing data file (but present in excel logbook)
missingentry=[i for i in sublist if i not in sublogfilelist]
for i, val in enumerate(missingdata):
print ('Data file number ', val, ' mentioned in AugerParamlog but missing from directory')
for i, val in enumerate(missingentry):
print ('Data file number ', val, ' present in directory but missing from AugerParamlog')
if makeentry: # not yet implemented and shouldn't happen... fix manually for now?
pass
# AugerParamLog=makemissingentries(missingentry, AugerParamLog)
return AugerParamLog
def smooth7cnts(df, areanum, evbreaks):
'''create smooth differentiated column from counts using S7D7 PHI algorithm (Multipak tables A-5 and A-1
version for rerun on combined spectra with internal ev breaks'''
countname='Counts'+str(areanum)
smcountname='Smcounts'+str(areanwum)
counts=df[countname].tolist() # convert to list
numpts=len(counts)
smooth=[0]*numpts # empty list of correct length for smoothed data
# smoothing of endpoints according to Multipak algorithm appendix table A-5
for i in range(0,numpts): # special cases for endpoints (within 3 of an evbreak)
diff=i-min(evbreaks, key=lambda x:abs(x-i)) # distance from closest evbreak index # in list
if diff==0:
if i==numpts-1: #last point
smooth[i]=(2*counts[i]+2*counts[i-1]+1)/4 # additional special case for last point
else: # first point
smooth[i]=(2*counts[i]+2*counts[i+1]+1)/4 # all others at exact breaks can use value and adj higher value
elif abs(diff)==1: # works for +1 or -1 from nearest break
smooth[i]=(1*counts[i-1]+2*counts[i]+1*counts[i+1]+1)/4
elif abs(diff)==2:
smooth[i]=(-3*counts[i-2]+12*counts[i-1]+17*counts[i]+12*counts[i+1]+-3*counts[i+2]+1)/35
else:
smooth[i]=(-2*counts[i-3]+3*counts[i-2]+6*counts[i-1]+7*counts[i]+6*counts[i+1]+3*counts[i+2]-2*counts[i+3]+1)/21
df[smcountname]=smooth # add smoothed data as new dataframe column
return df
def addsmoothloop(spelist):
''' Add smoothed counts data column for each area '''
for i in range(0,len(spelist)):
# get ith row from parameters log for subset of selected spe files (i.e. from spelist)
logmatch=spelist.iloc[i] #contains row with filename and all other parameters from a given spectra
logmatch=logmatch.squeeze() # convert/flatten to Series
numareas=int(logmatch.Areas) # get # of spatial areas for this spe
# load Auger spe file of interest here
AugerFileName=logmatch.Filename # get Auger filename from Series
Augerfile=pd.read_csv(AugerFileName) # read entire spectra into df
savefile=False
if 'Smcounts1' not in Augerfile: # same evbreaks for all areas
savefile=True
evbreaks=logmatch.Evbreaks # needed for possible savgol smooth-diff
tempstring=evbreaks.split('[')[1] # remove brackets from list
tempstring=tempstring.split(']')[0]
evbreaks=[int(s) for s in tempstring.split(',')] # convert string to list of break index values
# now loop through any areas within this spectrum (typically only 1 area)
for areanum in range(1,numareas+1): # loop over each separate area in spe
# Now check to ensure this Augerfile has all necessary columns for this area
# print('Processing area ', areanum) TESTING
colname='Counts'+str(areanum)
if colname not in Augerfile:# this shouldn't happen
print(colname, ' not present in file ', AugerFileName)
continue # skip to next area
smoothname='Smcounts'+str(areanum) # Sav-gol 2nd deriv column used to guide selection of fitting regions
if smoothname not in Augerfile: # returns df with this Savgol column added
Augerfile=smooth7cnts(Augerfile, areanum, evbreaks) # FUNCT pass full spectrum for given area (saved below)
# direct save of modified auger csv with new linear background fits (after all areas processed)
if savefile: # only save if new smoothed counts have been added
Augerfile.to_csv(AugerFileName, index=False)
return
def compareduplicatecomps(df, elemlist):
'''Return subset with same sample name but different file number (avoids multiple spatial areas from same crater which may be heterogeneous)'''
samplelist=df.Sample.unique()
samplelist=np.ndarray.tolist(samplelist)
# make dataframe for output
mycols=['Sample', 'Filenumber', 'Filename','Areanumber', 'AESbasis']
commoncols=['Sample', 'Filenumber', 'Filename','Areanumber', 'AESbasis']
for i, elem in enumerate(elemlist):
mycols.append('%'+elem)
mycols.append('err%'+elem)
mycols.append('minerr'+elem)
mycols.append('maxerr'+elem)
duplicatecompdata=pd.DataFrame(columns=mycols)
for i, sample in enumerate(samplelist):
# check for multiple spatial areas and process separately
match=df[df['Sample']==sample]
arealist=match.Areanumber.unique()
arealist=np.ndarray.tolist(arealist)
for j, areanum in enumerate(arealist):
thissample=pd.DataFrame(index=np.arange(0,1),columns=mycols) # single row for this sample
match2=match[match['Areanumber']==areanum]
for j, elem in enumerate(elemlist):
colname='%'+elem
vals=match2[colname].unique() # gets atomic percents
thissample=thissample.set_value(0,'%'+elem,vals.mean()) # set mean value as at. %
thissample=thissample.set_value(0,'err%'+elem,vals.std()) # set stdev as err in at. %
colname='err%'+elem # Same for errors
vals=match2[colname].unique() # gets error values in at % calculations (in case any are fubar)
thissample=thissample.set_value(0,'minerr'+elem,vals.min())
thissample=thissample.set_value(0,'maxerr'+elem,vals.max())
# Copy value or values for each common parameter
for j,colname in enumerate(commoncols):
templist=match2[colname].unique()
templist=np.ndarray.tolist(templist)
templist=set(templist) # eliminate duplicates
templist=list(templist)
if colname=='Filenumber' or colname=='Areanumber':
templist=[int(i) for i in templist] # convert floats to ints
templist=[str(s) for s in templist] # convert to string for output into df
templist=', '.join(templist) # convert to string
thissample=thissample.set_value(0,colname,templist)
duplicatecompdata=duplicatecompdata.append(thissample) # add to multirowed df
return duplicatecompdata
def findduplicates(df):
'''Return subset with same sample name but different file number (avoids multiple spatial areas from same crater which may be heterogeneous)'''
samplelist=df.Sample.unique()
samplelist=np.ndarray.tolist(samplelist)
keeplist=[] # index # of
for i, sample in enumerate(samplelist):
match=df[df['Sample']==sample]
temp=match.Filenumber.unique()
if len(temp)>1: # keep them
indlist=match.index.unique()
indlist=np.ndarray.tolist(indlist)
keeplist.extend(indlist) # add these to list of values to keep
dups=df[df.index.isin(keeplist)] # actual duplicates list
return dups
def keepbestvals(df, spectralregs):
'''For duplicate energy values, keep the one with largest # of sweeps '''
# generate temporary index ranges for spectralregs (can't just use eneryg values)
start=0
spectralregs['Start']=0 # for index #s
spectralregs['End']=0
for index,row in spectralregs.iterrows():
lower=spectralregs.loc[index]['Lower']
upper=spectralregs.loc[index]['Upper']
thisrange=int(upper-lower)
spectralregs=spectralregs.set_value(index,'Start',start)
spectralregs=spectralregs.set_value(index,'End',start+thisrange)
start=start+thisrange+1 # adjust for next loop
dupl=df.duplicated(['Energy'], keep=False) # only duplicate vals and keep both
dupl=df.loc[dupl]
energyvals=dupl.Energy.unique()
energyvals=np.ndarray.tolist(energyvals)
removelist=[]
for i, val in enumerate(energyvals):
thismatch=dupl[dupl['Energy']==val]
if len(thismatch)!=2:
print('Unknown error in energy duplicate elimination')
continue
else: # pick counts value with highest number of sweeps (best value)
index1=thismatch.index[0]
index2=thismatch.index[1]
specmatch1=spectralregs[(spectralregs['Start']<=index1)&(spectralregs['End']>=index1)]
specmatch2=spectralregs[(spectralregs['Start']<=index2)&(spectralregs['End']>=index2)]
try:
if specmatch1.iloc[0]['Sweeps']>=specmatch2.iloc[0]['Sweeps']:
# first is best value... remove it from dupl (which will be used as knockout df)
removelist.append(index1)
else:
removelist.append(index2)
except:
print('Problem with duplicate removal of ', index1, index2)
df=df[-df.index.isin(removelist)]
print (len(removelist), ' duplicated energy values removed from multiplex')
return df
def sortmultiplex(multiplex, evbreaks):
''' Rearrange multiplex if taken out of order and adjust evbreaks accordingly
evbreaks holds index #s of multiplex breaks so must be altered along with multiplex sort
only called if multiplex is not monotonically increasing '''
energylist=[]
for i, val in enumerate(evbreaks):
energylist.append(multiplex.loc[val]['Energy'])
if i>0 and i<len(evbreaks)-1:
energylist.append(multiplex.loc[val+1]['Energy']) # starting ev for next spectral region
multiplex=multiplex.sort_values(['Energy'])
multiplex=multiplex.drop_duplicates(['Energy'])
multiplex=multiplex.reset_index(drop=True)
matches=multiplex[multiplex['Energy'].isin(energylist)]
# Some can be internal data breaks if multiplex regions overlap
newevbreaks=matches.index.tolist()
newevbreaks=[int(i) for i in newevbreaks if i-1 not in newevbreaks] # remove adjacent values
return multiplex, newevbreaks
def avglogentry(logmatches):
''' create a logbook entry for a combine-averaged spectrum '''
firststr=str(logmatches.iloc[0][0]) # averaged file name is numbered "firstnum"lastnum"
laststr=str(logmatches.iloc[len(logmatches)-1][0])
avgnum=int(firststr+laststr)
avgentry=logmatches.iloc[[0]] # avg entry has same values as first file for cols 1,3-15, 17-24
avgentry=avgentry.squeeze() # convert to series (avoids warnings regarding changing a slice of the df)
# now change the small number of header params that will differ between single spe and averaged one
avgentry.iloc[0]=avgnum # assign filenum for averaged as firstnum-lastnum
tempval=avgentry.iloc[2]
tempval=tempval.replace(firststr+'.',firststr+laststr+'.') # avoids error if number is elsewhere in name string
avgentry.iloc[2]=tempval # reassign to correct filename
avgentry.iloc[16]*=len(logmatches) # multiply acquisition time by number of files
return avgentry # pandas series with all params and of correct dimension for append to main df log
def makecomboentries(combinelist, AugerParamLog):
''' Make log entry for average combined file using this loop and avglogentry function
for index, row in combinelist.iterrows(): # large loop through each match
this is normally done within combinespeloop '''
for index, row in combinelist.iterrows(): # large loop through each match
firstfile=int(combinelist.loc[index]['Filenumber']) # iloc grabs correct row from list of matches
lastfile=int(combinelist.loc[index]['Lastnumber'])
# look up filenames and # of areas associated with the above file range (all should have same # of area)
logmatches=AugerParamLog[(AugerParamLog['Filenumber']>=firstfile) & (AugerParamLog['Filenumber']<=lastfile)]
# Start of function combinespectra(logmatches) ... return new line for AugerFileParams logbook
# new entry into Augerparamlog
avgentry = avglogentry(logmatches) # create new logbook entry (series) for averaged spectrum (mostly from firstfile's info)
# append new Series entry to end of AugerParamLog
AugerParamLog=AugerParamLog.append(avgentry)
return AugerParamLog
def checklog(filelist, AugerParamLog):
''' Pass list of csv files in directory and checks the user Auger parameter log matches the actual data file list from directory
prints out filenumbers that have a problem to console'''
spelog=AugerParamLog[(AugerParamLog['Areas']>=1)]
loglist=spelog.Filename.unique()
loglist=np.ndarray.tolist(loglist)
missingdata=[i for i in loglist if i not in filelist] # list comprehension for missing data file (but present in excel logbook)
missingentry=[i for i in filelist if i not in loglist] # list comprehension for data file with missing log entry (could also convert to sets and compare)
for i, val in enumerate(missingdata):
print ('Data file number ', val, ' present in Auger params log but missing from directory')
for i, val in enumerate(missingentry):
print ('Data file number ', val, ' present in directory but missing from Auger params log')
# check for duplicate entries in logbook
myset=set([x for x in loglist if loglist.count(x) > 1])
for i in myset:
print('Duplicate entry for file number', i, ' in Auger params log.')
return
def removelistdups(df, colname):
''' Remove duplicates from any list stored within df column and return '''
for index,row in df.iterrows():
thisstr=df.loc[index][colname]
if str(thisstr)!='nan':
tempstring=thisstr.split('[')[1]
tempstring=tempstring.split(']')[0] # remove brackets
evbreaks=[int(s) for s in tempstring.split(',')] # turn string into list of ints
evbreaks=set(evbreaks)
evbreaks=list(evbreaks)
evbreaks.sort()
df=df.set_value(index,colname,evbreaks) # write it back
return df
def deletefiles(df):
''' Grab csv of each file in current df (e.g. after slicing) and copy to select subfolder
also save current filelog as AugerParamLog after path modification'''
for index, row in df.iterrows():
filename=df.loc[index]['Filename']
if os.path.exists(filename):
os.remove(filename)
print(filename, 'deleted.')
return
def movefiles(df, destdir):
''' Moves files named in dataframe to selected location '''
for index, row in df.iterrows():
filename=df.loc[index]['Filename']
if os.path.exists(filename):
newname=destdir+filename
shutil.move(filename, newname)
print(filename, 'moved to ', destdir)
else:
print(filename, 'not found.')
return
def copyexclusion(df1,df2):
'''If AugerFileParams shows exclusion of an entire spectrum, copy 'excluded' into comments of other logs
for each peak '''
#TODO finish this comment copying function
return
def copyproblemcomments(df1,df2):
'''If there's a problem noted in comments of integlog, copy over to smdiflog (or vice versa) '''
problemlist=df1['Comments'].str.contains('problem', case=False, na=False)
problems=df1.loc[problemlist]
if 'Comments' not in df2:
df2['Comments']='' # add comments col (sometimes missing from smdifpeakslog)
for index, row in problems.iterrows():
filenum=problems.loc[index]['Filenumber']
elem=problems.loc[index]['Element']
comment=problems.loc[index]['Comments']
match=df2[(df2['Filenumber']==filenum) & (df2['PeakID']==elem)]
if len(match)==1: # found associated peak
# append or add comment to other log
if str(match.iloc[0]['Comments'])=='' or str(match.iloc[0]['Comments'])=='nan':
# set value directly for df2 using index of match
df2=df2.set_value(match.index[0],'Comments',comment)
else:
currcomm=match.iloc[0]['Comments']
newcomm=comment+' '+currcomm
df2=df2.set_value(match.index[0],'Comments',newcomm)
return df2
def removefromlog(df1, df2):
'''If filename is in df1, remove these entries from df2.. then manually save '''
removal=df1.Filename.unique()
removal=np.ndarray.tolist(removal)
df2=df2[-df2.Filename.isin(removal)] # inverse of isin
return df2
def outputduplicates(df, colname):
'''Pass df and column name to search for duplicates (as string); outputs duplicates to console'''
df=df.reset_index(drop=True)
tempdf=df.duplicated([colname], keep=False) # marks all duplicates as true (even first last)
# get list of duplicated filenumber
for i in range(0,len(tempdf)):
if tempdf[i]==True:
print('Duplicated spectrum for sample: ', df.iloc[i]['Sample'])
return
def copyselectfiles(df,foldername):
''' Grab csv of each file in current df (e.g. after slicing) and copy to select subfolder
also save current filelog as AugerParamLog after path modification'''
if not os.path.exists(foldername): # create subdirectory for raw spe/sem/map files & csv sub-files (when combined)
os.makedirs(foldername)
# just use current path
curpath=df.iloc[0]['FilePath'] # should be same for entire folder
newpath=curpath + "\\" + foldername
df=df.replace(curpath, newpath) # changes all instances of current path in df
for i in range(0,len(df)):
filename=df.iloc[i]['Filename']
newname=foldername + "\\" + filename
shutil.copy2(filename, newname) # copy file
# Save parameters list for this subset
paramfile=foldername+ "\\" +'Augerparamlog.csv'
df.to_csv(paramfile,index=False) # save to sub
return
def truncatenumber(AugerParamLog,Smdifpeakslog):
''' Shortens long combo file name to 3 digit version'''
for i in range(0,len(AugerParamLog)):
val=str(AugerParamLog.iloc[i]['Filenumber'])
if len(val)==6 or len(val)==7:
newval=int(val[0:2])
if len(val)==8:
newval=int(val[0:3])
AugerParamLog.iloc[i]['Filenumber']=newval
for i in range(0,len(AugerParamLog)):
val=str(Smdifpeakslog.iloc[i]['Filenumber'])
if len(val)==6 or len(val)==7:
newval=int(val[0:2])
if len(val)==8:
newval=int(val[0:3])
Smdifpeakslog.iloc[i]['Filenumber']=newval
return AugerParamLog, Smdifpeakslog
| |
#-*- coding:utf-8 -*-
import socket, select, errno
import sys
import time
import traceback
import kive.event.timer as timer
import kive.http.dispatcher_server as dispatcher_server
import kive.config.settings as settings
import kive.common.util as util
import kive.status.status as status
from kive.common.singleton import *
@singleton
class Engine:
def __init__(self):
self.is_server = False
self.epoll = select.epoll()
self.fd2con = {}
self.incache = {}
self.outcache = {}
#handlers
self.inHandlers = {}
self.onDataHandlers = {}
self.onCloseHandlers = {}
self.onOutHandlers = {}
#status
self.status = status.status
self.timer = timer.Timer()
self.timer.add(1, self.updateStatus);
def updateStatus(self):
if self.status.update() or time.time()-self.status.last_print>10:
self.status.Print()
self.timer.add(1, self.updateStatus);
def register(self, con, in_handler, data_handler, out_handler=None, close_handler=None):
fd = con.fileno()
con.setsockopt(socket.SOL_SOCKET,socket.SO_SNDBUF,4096)
con.setsockopt(socket.SOL_SOCKET,socket.SO_RCVBUF,4096)
self.fd2con[fd] = con
self.incache[fd] = ""
self.outcache[fd] = ""
self.inHandlers[fd] = in_handler
self.onDataHandlers[fd] = data_handler
self.onCloseHandlers[fd] = close_handler
self.onOutHandlers[fd] = out_handler
self.status.n += 1
self.epoll.register(fd, select.EPOLLIN | select.EPOLLET | select.EPOLLHUP | select.EPOLLERR)
def unregister(self, fd):
self.status.close(fd)
del self.fd2con[fd]
del self.incache[fd]
del self.outcache[fd]
del self.onDataHandlers[fd]
del self.onCloseHandlers[fd]
del self.onOutHandlers[fd]
def bind(self, port):
self.is_server = True
svrsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
svrsocket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
svrsocket.bind(("0.0.0.0", port))
self.register(svrsocket, self.accept, None)
svrsocket.listen(1024768)
svrsocket.setblocking(0)
return svrsocket
def accept(self, svr_con):
try:
con, address = svr_con.accept()
con.setblocking(0)
self.register(con, self.receive, dispatcher_server.on_data)
return 0
except socket.error, msg:
if msg.errno != errno.EAGAIN:
traceback.print_exc()
return -1
def connect(self, ip, port, on_connect_handler, on_connect_parameters):
con = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
con.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
con.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
con.setblocking(0)
err = con.connect_ex((ip, port))
if err == errno.EINPROGRESS: #ok
pass
elif err == errno.EADDRNOTAVAIL: #not available
return -1
self.register(con, self.receive, on_connect_handler(*on_connect_parameters))
return con.fileno()
def send_delay(self, fd, data, seconds=1):
self.outcache[fd] += data
self.timer.add(seconds, self.send_out, (fd, ))
def send_nodelay(self, fd, data):
self.outcache[fd] += data
self.send_out(fd)
def send_out(self, fd):
if not self.fd2con.get(fd):
print >> sys.stderr, "Warning: before send,", fd, "has been closed."
return -1
try:
while len(self.outcache[fd]) > 0:
written = self.fd2con.get(fd).send(self.outcache[fd])
self.outcache[fd] = self.outcache[fd][written:]
if self.onOutHandlers.get(fd):
self.onOutHandlers[fd]()
self.epoll.modify(fd, select.EPOLLIN | select.EPOLLET | select.EPOLLHUP | select.EPOLLERR)
if settings.Debug:
print util.timestamp(), "send_out over, fd=", fd
except socket.error, msg:
if msg.errno == errno.EAGAIN:
if settings.Debug:
print fd, "send again"
self.epoll.modify(fd, select.EPOLLOUT | select.EPOLLET | select.EPOLLHUP | select.EPOLLERR)
else:
print >> sys.stderr, "send faliled, fd=%d, msg=%s" % (fd, msg)
self.close(fd)
except Exception, e:
print >> sys.stderr, "Error:%d send failed: err_msg=%s" % (fd, str(err_msg))
self.close(fd)
def run(self):
while 1:
self.loop()
def receive(self, con):
fd = con.fileno()
try:
tmp = con.recv(1024000)
if tmp:
if settings.Debug:
print fd, "READ:<<<", tmp,">>>"
self.incache[fd] = self.incache.get(fd, "") + tmp
return 0
else: # when the oper side closed
if settings.isClient:
print "EMPTY READ:", fd, tmp
self.close(fd)
return -1
except socket.error, msg:
if msg.errno == errno.EAGAIN :
if settings.Debug:
print "EAGAIN :", fd
in_data, out_data = self.onDataHandlers[fd](fd, self.incache[fd])
self.incache[fd] = in_data
self.send_nodelay(fd, out_data)
self.epoll.modify(fd, select.EPOLLET | select.EPOLLHUP | select.EPOLLERR)
return 1
elif msg.errno == errno.EWOULDBLOCK:
if settings.Debug:
print fd, "errno.EWOULDBLOCK"
self.close(fd)
return -1
else:
print >> sys.stderr, "ERROR:fd = %d." % (fd), str(msg)
self.close(fd)
return -1
def loop(self):
sec = self.timer.watch() #deal timer and get known of next tick
events = self.epoll.poll(sec)
for fd, event in events:
con = self.fd2con.get(fd)
try:
if event & select.EPOLLHUP:
print util.timestamp(), "select.EPOLLHUP,fd=", fd
if self.onCloseHandlers.get(fd):
self.onCloseHandlers[fd]()
self.close(fd)
elif event & select.EPOLLERR:
print >> sys.stderr, util.timestamp(), "select.EPOLLERR,fd=", fd
if self.onCloseHandlers.get(fd):
self.onCloseHandlers[fd]()
self.close(fd)
elif event & select.EPOLLIN:
while 1:
err = self.inHandlers[fd](con)
if err!=0:
break
elif event & select.EPOLLOUT:
if settings.Debug:
print util.timestamp(),fd,"select.EPOLLOUT"
self.send_out(fd)
else:
print("WARNING, UNKNOWN event:", event)
except:
traceback.print_exc()
def close(self, fd):
try:
if settings.Debug:
print "Notation, CLOSE fd=",fd
self.unregister(fd)
self.epoll.unregister(fd)
if fd in self.fd2con:
self.fd2con[fd].shutdown(socket.SHUT_RDWR)
self.fd2con[fd].close()
except Exception,e:
traceback.print_exc()
if __name__ == '__main__':
e=Engine()
e.run()
| |
## import wx
## import os
## from wx.lib.agw import fourwaysplitter as FWS
from pandac.PandaModules import *
from direct.wxwidgets.WxPandaShell import *
from direct.directtools.DirectSelection import SelectionRay
#from ViewPort import *
from .ObjectPaletteUI import *
from .ObjectPropertyUI import *
from .SceneGraphUI import *
from .LayerEditorUI import *
from .HotKeyUI import *
from .ProtoPaletteUI import *
from .ActionMgr import *
from .AnimControlUI import *
from .CurveAnimUI import *
from .GraphEditorUI import *
class PandaTextDropTarget(wx.TextDropTarget):
def __init__(self, editor, view):
wx.TextDropTarget.__init__(self)
self.editor = editor
self.view = view
def OnDropText(self, x, y, text):
# create new object
parentNPRef = [None]
if not self.editor.propMeetsReq(text, parentNPRef):
return
action = ActionAddNewObj(self.editor, text, parent=parentNPRef[0])
self.editor.actionMgr.push(action)
newobj = action()
print(newobj)
if newobj is None:
return
# change window coordinate to mouse coordinate
mx = 2 * (x/float(self.view.ClientSize.GetWidth()) - 0.5)
my = -2 * (y/float(self.view.ClientSize.GetHeight()) - 0.5)
# create ray from the camera to detect 3d position
iRay = SelectionRay(self.view.camera)
iRay.collider.setFromLens(self.view.camNode, mx, my)
hitPt = None
if self.editor.objectMgr.currLiveNP:
iRay.collideWithGeom()
iRay.ct.traverse(self.editor.objectMgr.currLiveNP)
def isEntryBackfacing(iRay, entry):
if not entry.hasSurfaceNormal():
# Well, no way to tell. Assume we're not backfacing.
return 0
fromNodePath = entry.getFromNodePath()
v = Vec3(entry.getSurfacePoint(fromNodePath))
n = entry.getSurfaceNormal(fromNodePath)
# Convert to camera space for backfacing test
p2cam = iRay.collisionNodePath.getParent().getMat(self.view.camera)
v = Vec3(p2cam.xformPoint(v))
n = p2cam.xformVec(n)
# Normalize and check angle between to vectors
v.normalize()
return v.dot(n) >= 0
iRay.sortEntries()
for entry in iRay.getEntries():
if isEntryBackfacing(iRay, entry):
pass
else:
hitPt = entry.getSurfacePoint(entry.getFromNodePath())
break
if hitPt is None:
iRay.collideWithBitMask(BitMask32.bit(21))
iRay.ct.traverse(self.view.collPlane)
if iRay.getNumEntries() > 0:
entry = iRay.getEntry(0)
hitPt = entry.getSurfacePoint(entry.getFromNodePath())
if hitPt:
# create a temp nodePath to get the position
np = NodePath('temp')
np.setPos(self.view.camera, hitPt)
if base.direct.manipulationControl.fGridSnap:
snappedPos = self.view.grid.computeSnapPoint(np.getPos())
np.setPos(snappedPos)
# update temp nodePath's HPR and scale with newobj's
np.setHpr(newobj.getHpr())
np.setScale(newobj.getScale())
# transform newobj to cursor position
obj = self.editor.objectMgr.findObjectByNodePath(newobj)
action = ActionTransformObj(self.editor, obj[OG.OBJ_UID], Mat4(np.getMat()))
self.editor.actionMgr.push(action)
np.remove()
action()
iRay.collisionNodePath.removeNode()
del iRay
ID_NEW = 101
ID_OPEN = 102
ID_SAVE = 103
ID_SAVE_AS = 104
ID_EXPORT_TO_MAYA = 105
ID_DUPLICATE = 201
ID_MAKE_LIVE = 202
ID_UNDO = 203
ID_REDO = 204
ID_SHOW_GRID = 301
ID_GRID_SIZE = 302
ID_GRID_SNAP = 303
ID_SHOW_PANDA_OBJECT = 304
ID_HOT_KEYS = 305
ID_PARENT_TO_SELECTED = 306
ID_CREATE_CURVE = 601
ID_EDIT_CURVE = 602
ID_CURVE_ANIM = 603
ID_ANIM = 701
ID_GRAPH = 702
class LevelEditorUIBase(WxPandaShell):
""" Class for Panda3D LevelEditor """
def __init__(self, editor):
self.MENU_TEXTS.update({
ID_NEW : ("&New", "LE-NewScene"),
ID_OPEN : ("&Open", "LE-OpenScene"),
ID_SAVE : ("&Save", "LE-SaveScene"),
ID_SAVE_AS : ("Save &As", None),
ID_EXPORT_TO_MAYA : ("Export to Maya", None),
wx.ID_EXIT : ("&Quit", "LE-Quit"),
ID_DUPLICATE : ("&Duplicate", "LE-Duplicate"),
ID_MAKE_LIVE : ("Make &Live", "LE-MakeLive"),
ID_UNDO : ("&Undo", "LE-Undo"),
ID_REDO : ("&Redo", "LE-Redo"),
ID_SHOW_GRID : ("&Show Grid", None),
ID_GRID_SIZE : ("&Grid Size", None),
ID_GRID_SNAP : ("Grid S&nap", None),
ID_SHOW_PANDA_OBJECT : ("Show &Panda Objects", None),
ID_HOT_KEYS : ("&Hot Keys", None),
ID_PARENT_TO_SELECTED : ("&Parent To Selected", None),
ID_CREATE_CURVE : ("&Create Curve", None),
ID_EDIT_CURVE : ("&Edit Curve", None),
ID_CURVE_ANIM : ("&Curve Animation", None),
ID_ANIM : ("&Edit Animation", None),
ID_GRAPH : ("&Graph Editor", None)
})
self.editor = editor
WxPandaShell.__init__(self, fStartDirect=True)
self.contextMenu = ViewportMenu()
self.bindKeyEvents(True)
def bindKeyEvents(self, toBind=True):
if toBind:
self.wxApp.Bind(wx.EVT_CHAR, self.onKeyEvent)
self.wxApp.Bind(wx.EVT_KEY_DOWN, self.onKeyDownEvent)
self.wxApp.Bind(wx.EVT_KEY_UP, self.onKeyUpEvent)
else:
self.wxApp.Unbind(wx.EVT_CHAR)
self.wxApp.Unbind(wx.EVT_KEY_DOWN)
self.wxApp.Unbind(wx.EVT_KEY_UP)
def createMenu(self):
menuItem = self.menuFile.Insert(0, ID_NEW, self.MENU_TEXTS[ID_NEW][0])
self.Bind(wx.EVT_MENU, self.onNew, menuItem)
menuItem = self.menuFile.Insert(1, ID_OPEN, self.MENU_TEXTS[ID_OPEN][0])
self.Bind(wx.EVT_MENU, self.onOpen, menuItem)
menuItem = self.menuFile.Insert(2, ID_SAVE, self.MENU_TEXTS[ID_SAVE][0])
self.Bind(wx.EVT_MENU, self.onSave, menuItem)
menuItem = self.menuFile.Insert(3, ID_SAVE_AS, self.MENU_TEXTS[ID_SAVE_AS][0])
self.Bind(wx.EVT_MENU, self.onSaveAs, menuItem)
menuItem = self.menuFile.Insert(4, ID_EXPORT_TO_MAYA, self.MENU_TEXTS[ID_EXPORT_TO_MAYA][0])
self.Bind(wx.EVT_MENU, self.onExportToMaya, menuItem)
self.menuEdit = wx.Menu()
self.menuBar.Insert(1, self.menuEdit, "&Edit")
menuItem = self.menuEdit.Append(ID_DUPLICATE, self.MENU_TEXTS[ID_DUPLICATE][0])
self.Bind(wx.EVT_MENU, self.onDuplicate, menuItem)
menuItem = self.menuEdit.Append(ID_MAKE_LIVE, self.MENU_TEXTS[ID_MAKE_LIVE][0])
self.Bind(wx.EVT_MENU, self.onMakeLive, menuItem)
menuItem = self.menuEdit.Append(ID_UNDO, self.MENU_TEXTS[ID_UNDO][0])
self.Bind(wx.EVT_MENU, self.editor.actionMgr.undo, menuItem)
menuItem = self.menuEdit.Append(ID_REDO, self.MENU_TEXTS[ID_REDO][0])
self.Bind(wx.EVT_MENU, self.editor.actionMgr.redo, menuItem)
self.menuOptions = wx.Menu()
self.menuBar.Insert(2, self.menuOptions, "&Options")
self.showGridMenuItem = self.menuOptions.Append(ID_SHOW_GRID, self.MENU_TEXTS[ID_SHOW_GRID][0], kind = wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.toggleGrid, self.showGridMenuItem)
self.gridSizeMenuItem = self.menuOptions.Append(ID_GRID_SIZE, self.MENU_TEXTS[ID_GRID_SIZE][0])
self.Bind(wx.EVT_MENU, self.onGridSize, self.gridSizeMenuItem)
self.gridSnapMenuItem = self.menuOptions.Append(ID_GRID_SNAP, self.MENU_TEXTS[ID_GRID_SNAP][0], kind = wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.toggleGridSnap, self.gridSnapMenuItem)
self.showPandaObjectsMenuItem = self.menuOptions.Append(ID_SHOW_PANDA_OBJECT, self.MENU_TEXTS[ID_SHOW_PANDA_OBJECT][0], kind = wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.onShowPandaObjects, self.showPandaObjectsMenuItem)
self.parentToSelectedMenuItem = self.menuOptions.Append(ID_PARENT_TO_SELECTED, self.MENU_TEXTS[ID_PARENT_TO_SELECTED][0], kind = wx.ITEM_CHECK)
self.hotKeysMenuItem = self.menuOptions.Append(ID_HOT_KEYS, self.MENU_TEXTS[ID_HOT_KEYS][0])
self.Bind(wx.EVT_MENU, self.onHotKeys, self.hotKeysMenuItem)
self.menuCurve = wx.Menu()
self.menuBar.Insert(3, self.menuCurve, "&CurveMode")
self.createCurveMenuItem = self.menuCurve.Append(ID_CREATE_CURVE, self.MENU_TEXTS[ID_CREATE_CURVE][0], kind = wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.onCreateCurve, self.createCurveMenuItem)
self.editCurveMenuItem = self.menuCurve.Append(ID_EDIT_CURVE, self.MENU_TEXTS[ID_EDIT_CURVE][0], kind = wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.onEditCurve, self.editCurveMenuItem)
self.curveAnimMenuItem = self.menuCurve.Append(ID_CURVE_ANIM, self.MENU_TEXTS[ID_CURVE_ANIM][0], kind = wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.onCurveAnim, self.curveAnimMenuItem)
self.menuAnim = wx.Menu()
self.menuBar.Insert(4, self.menuAnim, "&AnimationMode")
self.editAnimMenuItem = self.menuAnim.Append(ID_ANIM, self.MENU_TEXTS[ID_ANIM][0], kind = wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.onAnimation, self.editAnimMenuItem)
self.graphEditorMenuItem = self.menuAnim.Append(ID_GRAPH, self.MENU_TEXTS[ID_GRAPH][0], kind = wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.onGraphEditor, self.graphEditorMenuItem)
WxPandaShell.createMenu(self)
def onGraphEditor(self,e):
if base.direct.selected.last == None:
dlg = wx.MessageDialog(None, 'Please select a object first.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
self.graphEditorMenuItem.Check(False)
else:
currentObj = self.editor.objectMgr.findObjectByNodePath(base.direct.selected.last)
self.graphEditorUI = GraphEditorUI(self, self.editor, currentObj)
self.graphEditorUI.Show()
self.graphEditorMenuItem.Check(True)
def onAnimation(self,e):
if self.editor.mode != self.editor.ANIM_MODE:
self.animUI = AnimControlUI(self, self.editor)
self.animUI.Show()
self.editor.mode = self.editor.ANIM_MODE
if self.editor.mode == self.editor.ANIM_MODE:
self.editAnimMenuItem.Check(True)
def onCurveAnim(self,e):
self.curveAnimUI = CurveAnimUI(self, self.editor)
self.curveAnimUI.Show()
self.curveAnimMenuItem.Check(True)
def onCreateCurve(self,e):
"""Function to invoke curve creating, need to check previous mode"""
if self.editor.mode == self.editor.CREATE_CURVE_MODE:
self.createCurveMenuItem.Check(False)
self.editor.curveEditor.onBaseMode()
else:
if self.editor.mode == self.editor.EDIT_CURVE_MODE:
self.editor.curveEditor.onBaseMode()
self.editCurveMenuItem.Check(False)
self.createCurveMenuItem.Check(True)
self.onCreateCurve(None)
else:
self.currentView = self.getCurrentView()
if self.currentView == None:
dlg = wx.MessageDialog(None, 'Please select a viewport first.Do not support curve creation under four viewports.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
self.createCurveMenuItem.Check(False)
else:
self.editor.mode = self.editor.CREATE_CURVE_MODE
self.editor.updateStatusReadout('Please press ENTER to end the curve creation.')
degreeUI = CurveDegreeUI(self, -1, 'Curve Degree')
degreeUI.ShowModal()
degreeUI.Destroy()
base.direct.manipulationControl.disableManipulation()
self.editCurveMenuItem.Check(False)
def onEditCurve(self,e):
"""Function to invoke curve editing and translate global information to local information. Need to check previous mode"""
if self.editor.mode == self.editor.EDIT_CURVE_MODE:
self.editCurveMenuItem.Check(False)
self.editor.curveEditor.onBaseMode()
else:
if self.editor.mode == self.editor.CREATE_CURVE_MODE:
self.editor.curveEditor.onBaseMode()
self.editCurveMenuItem.Check(True)
self.createCurveMenuItem.Check(False)
self.onEditCurve(None)
else:
if base.direct.selected.last == None:
dlg = wx.MessageDialog(None, 'Please select a curve first.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
self.editCurveMenuItem.Check(False)
if base.direct.selected.last != None :
base.direct.manipulationControl.enableManipulation()
self.createCurveMenuItem.Check(False)
self.curveObj = self.editor.objectMgr.findObjectByNodePath(base.direct.selected.last)
if self.curveObj[OG.OBJ_DEF].name == '__Curve__':
self.editor.mode = self.editor.EDIT_CURVE_MODE
self.editor.updateStatusReadout('Please press ENTER to end the curve editing.')
self.editor.curveEditor.currentRope = self.curveObj[OG.OBJ_NP]
self.editor.curveEditor.curveControl = self.curveObj[OG.OBJ_PROP]['curveInfo']
self.editor.curveEditor.degree = self.curveObj[OG.OBJ_PROP]['Degree']
for item in self.editor.curveEditor.curveControl:
item[1].show()
self.editor.curveEditor.curve.append((None, item[1].getPos()))
else:
dlg = wx.MessageDialog(None, 'Please select a curve first.', 'NOTICE', wx.OK )
dlg.ShowModal()
dlg.Destroy()
self.editCurveMenuItem.Check(False)
def updateMenu(self):
hotKeyDict = {}
for hotKey in base.direct.hotKeyMap.keys():
desc = base.direct.hotKeyMap[hotKey]
hotKeyDict[desc[1]] = hotKey
for id in self.MENU_TEXTS.keys():
desc = self.MENU_TEXTS[id]
if desc[1]:
menuItem = self.menuBar.FindItemById(id)
hotKey = hotKeyDict.get(desc[1])
if hotKey:
menuItem.SetText(desc[0] + "\t%s"%hotKey)
def createInterface(self):
WxPandaShell.createInterface(self)
self.leftBarUpNB = wx.Notebook(self.leftBarUpPane, style=wx.NB_BOTTOM)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.leftBarUpNB, 1, wx.EXPAND)
self.leftBarUpPane.SetSizer(sizer)
self.leftBarUpPane0 = wx.Panel(self.leftBarUpNB, -1)
self.leftBarUpNB.AddPage(self.leftBarUpPane0, 'Object Palette')
self.leftBarUpPane1 = wx.Panel(self.leftBarUpNB, -1)
self.leftBarUpNB.AddPage(self.leftBarUpPane1, 'Proto Palette')
self.leftBarDownNB = wx.Notebook(self.leftBarDownPane)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.leftBarDownNB, 1, wx.EXPAND)
self.leftBarDownPane.SetSizer(sizer)
self.leftBarDownPane0 = wx.Panel(self.leftBarDownNB, -1)
self.leftBarDownNB.AddPage(self.leftBarDownPane0, 'Scene Graph')
self.rightBarDownNB = wx.Notebook(self.rightBarDownPane)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.rightBarDownNB, 1, wx.EXPAND)
self.rightBarDownPane.SetSizer(sizer)
self.rightBarDownPane0 = wx.Panel(self.rightBarDownNB, -1)
self.rightBarDownNB.AddPage(self.rightBarDownPane0, 'Layers')
self.topView.SetDropTarget(PandaTextDropTarget(self.editor, self.topView))
self.frontView.SetDropTarget(PandaTextDropTarget(self.editor, self.frontView))
self.leftView.SetDropTarget(PandaTextDropTarget(self.editor, self.leftView))
self.perspView.SetDropTarget(PandaTextDropTarget(self.editor, self.perspView))
self.rightBarDownPane.Layout()
self.Layout()
self.objectPaletteUI = ObjectPaletteUI(self.leftBarUpPane0, self.editor)
self.protoPaletteUI = ProtoPaletteUI(self.leftBarUpPane1, self.editor)
self.objectPropertyUI = ObjectPropertyUI(self.rightBarUpPane, self.editor)
self.sceneGraphUI = SceneGraphUI(self.leftBarDownPane0, self.editor)
self.layerEditorUI = LayerEditorUI(self.rightBarDownPane0, self.editor)
self.showGridMenuItem.Check(True)
def onRightDown(self, evt=None):
"""Invoked when the viewport is right-clicked."""
if evt == None:
mpos = wx.GetMouseState()
mpos = self.ScreenToClient((mpos.x, mpos.y))
else:
mpos = evt.GetPosition()
base.direct.fMouse3 = 0
self.PopupMenu(self.contextMenu, mpos)
def onKeyDownEvent(self, evt):
if evt.GetKeyCode() == wx.WXK_ALT:
base.direct.fAlt = 1
elif evt.GetKeyCode() == wx.WXK_CONTROL:
base.direct.fControl = 1
elif evt.GetKeyCode() == wx.WXK_SHIFT:
base.direct.fShift = 1
elif evt.GetKeyCode() == wx.WXK_UP:
messenger.send('arrow_up')
elif evt.GetKeyCode() == wx.WXK_DOWN:
messenger.send('arrow_down')
elif evt.GetKeyCode() == wx.WXK_LEFT:
messenger.send('arrow_left')
elif evt.GetKeyCode() == wx.WXK_RIGHT:
messenger.send('arrow_right')
elif evt.GetKeyCode() == wx.WXK_PAGEUP:
messenger.send('page_up')
elif evt.GetKeyCode() == wx.WXK_PAGEDOWN:
messenger.send('page_down')
else:
evt.Skip()
def onKeyUpEvent(self, evt):
if evt.GetKeyCode() == wx.WXK_ALT:
base.direct.fAlt = 0
elif evt.GetKeyCode() == wx.WXK_CONTROL:
base.direct.fControl = 0
elif evt.GetKeyCode() == wx.WXK_SHIFT:
base.direct.fShift = 0
elif evt.GetKeyCode() == wx.WXK_UP:
messenger.send('arrow_up-up')
elif evt.GetKeyCode() == wx.WXK_DOWN:
messenger.send('arrow_down-up')
elif evt.GetKeyCode() == wx.WXK_LEFT:
messenger.send('arrow_left-up')
elif evt.GetKeyCode() == wx.WXK_RIGHT:
messenger.send('arrow_right-up')
elif evt.GetKeyCode() == wx.WXK_PAGEUP:
messenger.send('page_up-up')
elif evt.GetKeyCode() == wx.WXK_PAGEDOWN:
messenger.send('page_down-up')
else:
evt.Skip()
def onKeyEvent(self, evt):
input = ''
if evt.GetKeyCode() in range(97, 123): # for keys from a to z
if evt.GetModifiers() == 4: # when shift is pressed while caps lock is on
input = 'shift-%s'%chr(evt.GetKeyCode())
else:
input = chr(evt.GetKeyCode())
elif evt.GetKeyCode() in range(65, 91):
if evt.GetModifiers() == 4: # when shift is pressed
input = 'shift-%s'%chr(evt.GetKeyCode() + 32)
else:
input = chr(evt.GetKeyCode() + 32)
elif evt.GetKeyCode() in range(1, 27): # for keys from a to z with control
input = 'control-%s'%chr(evt.GetKeyCode()+96)
elif evt.GetKeyCode() == wx.WXK_DELETE:
input = 'delete'
elif evt.GetKeyCode() == wx.WXK_ESCAPE:
input = 'escape'
else:
if evt.GetModifiers() == 4:
input = 'shift-%s'%chr(evt.GetKeyCode())
elif evt.GetModifiers() == 2:
input = 'control-%s'%chr(evt.GetKeyCode())
elif evt.GetKeyCode() < 256:
input = chr(evt.GetKeyCode())
if input in base.direct.hotKeyMap.keys():
keyDesc = base.direct.hotKeyMap[input]
messenger.send(keyDesc[1])
def reset(self):
self.sceneGraphUI.reset()
self.layerEditorUI.reset()
def onNew(self, evt=None):
self.editor.reset()
def onOpen(self, evt=None):
dialog = wx.FileDialog(None, "Choose a file", os.getcwd(), "", "*.py", wx.OPEN)
if dialog.ShowModal() == wx.ID_OK:
self.editor.load(dialog.GetPath())
self.editor.setTitleWithFilename(dialog.GetPath())
dialog.Destroy()
def onSave(self, evt=None):
if self.editor.currentFile is None or\
not self.editor.currentFile.endswith('.py'):
return self.onSaveAs(evt)
else:
self.editor.save()
def onSaveAs(self, evt):
dialog = wx.FileDialog(None, "Choose a file", os.getcwd(), "", "*.py", wx.SAVE)
result = True
if dialog.ShowModal() == wx.ID_OK:
self.editor.saveAs(dialog.GetPath())
self.editor.setTitleWithFilename(dialog.GetPath())
else:
result = False
dialog.Destroy()
return result
def onExportToMaya(self, evt):
dialog = wx.FileDialog(None, "Choose a file", os.getcwd(), "", "*.mb", wx.SAVE)
if dialog.ShowModal() == wx.ID_OK:
self.editor.exportToMaya(dialog.GetPath())
dialog.Destroy()
def onDuplicate(self, evt):
self.editor.objectMgr.duplicateSelected()
def onMakeLive(self, evt):
self.editor.objectMgr.makeSelectedLive()
def toggleGrid(self, evt):
if self.showGridMenuItem.IsChecked():
for grid in [self.perspView.grid, self.topView.grid, self.frontView.grid, self.leftView.grid]:
if grid.isHidden():
grid.show()
else:
for grid in [self.perspView.grid, self.topView.grid, self.frontView.grid, self.leftView.grid]:
if not grid.isHidden():
grid.hide()
def toggleGridSnap(self, evt):
if self.gridSnapMenuItem.IsChecked():
base.direct.manipulationControl.fGridSnap = 1
for grid in [self.perspView.grid, self.topView.grid, self.frontView.grid, self.leftView.grid]:
grid.fXyzSnap = 1
else:
base.direct.manipulationControl.fGridSnap = 0
for grid in [self.perspView.grid, self.topView.grid, self.frontView.grid, self.leftView.grid]:
grid.fXyzSnap = 0
def onGridSize(self, evt):
gridSizeUI = GridSizeUI(self, -1, 'Change Grid Size', self.perspView.grid.gridSize, self.perspView.grid.gridSpacing)
gridSizeUI.ShowModal()
gridSizeUI.Destroy()
def onShowPandaObjects(self, evt):
self.sceneGraphUI.showPandaObjectChildren()
def onDestroy(self, evt):
self.editor.protoPalette.saveToFile()
self.editor.saveSettings()
self.editor.reset()
def updateGrids(self, newSize, newSpacing):
self.perspView.grid.gridSize = newSize
self.perspView.grid.gridSpacing = newSpacing
self.perspView.grid.updateGrid()
self.topView.grid.gridSize = newSize
self.topView.grid.gridSpacing = newSpacing
self.topView.grid.updateGrid()
self.frontView.grid.gridSize = newSize
self.frontView.grid.gridSpacing = newSpacing
self.frontView.grid.updateGrid()
self.leftView.grid.gridSize = newSize
self.leftView.grid.gridSpacing = newSpacing
self.leftView.grid.updateGrid()
def onHotKeys(self, evt):
hotKeyUI = HotKeyUI(self, -1, 'Hot Key List')
hotKeyUI.ShowModal()
hotKeyUI.Destroy()
def buildContextMenu(self, nodePath):
for menuItem in self.contextMenu.GetMenuItems():
self.contextMenu.RemoveItem(menuItem)
self.contextMenu.addItem('Replace This', call=lambda\
p0=None, p1=False:self.replaceObject(p0, p1))
self.contextMenu.addItem('Replace All', call=lambda\
p0=None, p1=True:self.replaceObject(p0, p1))
self.contextMenu.AppendSeparator()
def replaceObject(self, evt, all=False):
currObj = self.editor.objectMgr.findObjectByNodePath(base.direct.selected.last)
if currObj is None:
print('No valid object is selected for replacement')
return
targetType = self.editor.ui.objectPaletteUI.getSelected()
if targetType is None:
print('No valid target type is selected for replacement')
return
if all:
typeName = currObj[OG.OBJ_DEF].name
objs = self.editor.objectMgr.findObjectsByTypeName(typeName)
for obj in objs:
self.editor.objectMgr.replaceObjectWithTypeName(obj, targetType)
else:
self.editor.objectMgr.replaceObjectWithTypeName(currObj, targetType)
class GridSizeUI(wx.Dialog):
def __init__(self, parent, id, title, gridSize, gridSpacing):
wx.Dialog.__init__(self, parent, id, title, size=(250, 240))
self.parent = parent
panel = wx.Panel(self, -1)
vbox = wx.BoxSizer(wx.VERTICAL)
wx.StaticBox(panel, -1, 'Grid Size', (5, 5), (235, 80))
self.gridSizeSlider = WxSlider(panel, -1, float(gridSize), 10.0, 100000.0,
pos = (10, 25), size=(220, -1),
style=wx.SL_HORIZONTAL | wx.SL_LABELS, textSize=(80,20))
self.gridSizeSlider.Enable()
wx.StaticBox(panel, -1, 'Grid Space', (5, 90), (235, 80))
self.gridSpacingSlider = WxSlider(panel, -1, float(gridSpacing), 0.01, 2000.0,
pos = (10, 115), size=(220, -1),
style=wx.SL_HORIZONTAL | wx.SL_LABELS)
self.gridSpacingSlider.Enable()
okButton = wx.Button(self, -1, 'Apply', size=(70, 20))
okButton.Bind(wx.EVT_BUTTON, self.onApply)
vbox.Add(panel)
vbox.Add(okButton, 1, wx.ALIGN_CENTER | wx.TOP | wx.BOTTOM, 5)
self.SetSizer(vbox)
base.le.ui.bindKeyEvents(False)
def onApply(self, evt):
newSize = self.gridSizeSlider.GetValue()
newSpacing = self.gridSpacingSlider.GetValue()
self.parent.updateGrids(newSize, newSpacing)
base.le.ui.bindKeyEvents(True)
self.Destroy()
class ViewportMenu(wx.Menu):
"""Represents a menu that appears when right-clicking a viewport."""
def __init__(self):
wx.Menu.__init__(self)
def addItem(self, name, parent = None, call = None, id = None):
if id == None: id = wx.NewId()
if parent == None: parent = self
item = wx.MenuItem(parent, id, name)
parent.AppendItem(item)
if call != None:
self.Bind(wx.EVT_MENU, call, item)
def addMenu(self, name, parent = None, id = None):
if id == None: id = wx.NewId()
subMenu = wx.Menu()
if parent == None: parent = self
parent.AppendMenu(id, name, subMenu)
return subMenu
class CurveDegreeUI(wx.Dialog):
def __init__(self, parent, id, title):
wx.Dialog.__init__(self, parent, id, title, size=(150, 120))
self.parent = parent
panel = wx.Panel(self, -1)
degreeBox = wx.BoxSizer(wx.VERTICAL)
degreeList = ['2','3','4']
self.degree = wx.RadioBox(panel, -1, 'Curve Degree', (5, 5), wx.DefaultSize, degreeList, 3, wx.RA_SPECIFY_COLS)
self.degree.SetToolTipString("Select the degree of the curve.")
self.degree.SetSelection(1)
okButton = wx.Button(self, -1, 'Apply', size=(70, 20))
okButton.Bind(wx.EVT_BUTTON, self.onApply)
degreeBox.Add(panel, 1, wx.ALIGN_CENTER | wx.TOP | wx.BOTTOM, 5)
degreeBox.Add(okButton, 0, wx.ALIGN_CENTER | wx.TOP | wx.BOTTOM, 5)
self.SetSizer(degreeBox)
def onApply(self, evt):
if(str(self.degree.GetSelection())=='0'):
self.parent.editor.curveEditor.degree = 2
if(str(self.degree.GetSelection())=='1'):
self.parent.editor.curveEditor.degree = 3
if(str(self.degree.GetSelection())=='2'):
self.parent.editor.curveEditor.degree = 4
self.Destroy()
| |
# Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import hashlib
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.compat import json
import boto
class SNSConnection(AWSQueryConnection):
DefaultRegionName = 'us-east-1'
DefaultRegionEndpoint = 'sns.us-east-1.amazonaws.com'
APIVersion = '2010-03-31'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint,
connection_cls=SNSConnection)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs)
def _required_auth_capability(self):
return ['hmac-v4']
def get_all_topics(self, next_token=None):
"""
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {'ContentType': 'JSON'}
if next_token:
params['NextToken'] = next_token
response = self.make_request('ListTopics', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_topic_attributes(self, topic):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic}
response = self.make_request('GetTopicAttributes', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def set_topic_attributes(self, topic, attr_name, attr_value):
"""
Get attributes of a Topic
:type topic: string
:param topic: The ARN of the topic.
:type attr_name: string
:param attr_name: The name of the attribute you want to set.
Only a subset of the topic's attributes are mutable.
Valid values: Policy | DisplayName
:type attr_value: string
:param attr_value: The new value for the attribute.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'AttributeName': attr_name,
'AttributeValue': attr_value}
response = self.make_request('SetTopicAttributes', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def add_permission(self, topic, label, account_ids, actions):
"""
Adds a statement to a topic's access control policy, granting
access for the specified AWS accounts to the specified actions.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the new policy statement.
:type account_ids: list of strings
:param account_ids: The AWS account ids of the users who will be
give access to the specified actions.
:type actions: list of strings
:param actions: The actions you want to allow for each of the
specified principal(s).
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'Label': label}
self.build_list_params(params, account_ids, 'AWSAccountId.member')
self.build_list_params(params, actions, 'ActionName.member')
response = self.make_request('AddPermission', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def remove_permission(self, topic, label):
"""
Removes a statement from a topic's access control policy.
:type topic: string
:param topic: The ARN of the topic.
:type label: string
:param label: A unique identifier for the policy statement
to be removed.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'Label': label}
response = self.make_request('RemovePermission', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def create_topic(self, topic):
"""
Create a new Topic.
:type topic: string
:param topic: The name of the new topic.
"""
params = {'ContentType': 'JSON',
'Name': topic}
response = self.make_request('CreateTopic', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def delete_topic(self, topic):
"""
Delete an existing topic
:type topic: string
:param topic: The ARN of the topic
"""
params = {'ContentType': 'JSON',
'TopicArn': topic}
response = self.make_request('DeleteTopic', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def publish(self, topic, message, subject=None):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type message: string
:param message: The message you want to send to the topic.
Messages must be UTF-8 encoded strings and
be at most 4KB in size.
:type subject: string
:param subject: Optional parameter to be used as the "Subject"
line of the email notifications.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'Message': message}
if subject:
params['Subject'] = subject
response = self.make_request('Publish', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def subscribe(self, topic, protocol, endpoint):
"""
Subscribe to a Topic.
:type topic: string
:param topic: The ARN of the new topic.
:type protocol: string
:param protocol: The protocol used to communicate with
the subscriber. Current choices are:
email|email-json|http|https|sqs
:type endpoint: string
:param endpoint: The location of the endpoint for
the subscriber.
* For email, this would be a valid email address
* For email-json, this would be a valid email address
* For http, this would be a URL beginning with http
* For https, this would be a URL beginning with https
* For sqs, this would be the ARN of an SQS Queue
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'Protocol': protocol,
'Endpoint': endpoint}
response = self.make_request('Subscribe', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def subscribe_sqs_queue(self, topic, queue):
"""
Subscribe an SQS queue to a topic.
This is convenience method that handles most of the complexity involved
in using an SQS queue as an endpoint for an SNS topic. To achieve this
the following operations are performed:
* The correct ARN is constructed for the SQS queue and that ARN is
then subscribed to the topic.
* A JSON policy document is contructed that grants permission to
the SNS topic to send messages to the SQS queue.
* This JSON policy is then associated with the SQS queue using
the queue's set_attribute method. If the queue already has
a policy associated with it, this process will add a Statement to
that policy. If no policy exists, a new policy will be created.
:type topic: string
:param topic: The ARN of the new topic.
:type queue: A boto Queue object
:param queue: The queue you wish to subscribe to the SNS Topic.
"""
t = queue.id.split('/')
q_arn = queue.arn
sid = hashlib.md5(topic + q_arn).hexdigest()
sid_exists = False
resp = self.subscribe(topic, 'sqs', q_arn)
attr = queue.get_attributes('Policy')
if 'Policy' in attr:
policy = json.loads(attr['Policy'])
else:
policy = {}
if 'Version' not in policy:
policy['Version'] = '2008-10-17'
if 'Statement' not in policy:
policy['Statement'] = []
# See if a Statement with the Sid exists already.
for s in policy['Statement']:
if s['Sid'] == sid:
sid_exists = True
if not sid_exists:
statement = {'Action': 'SQS:SendMessage',
'Effect': 'Allow',
'Principal': {'AWS': '*'},
'Resource': q_arn,
'Sid': sid,
'Condition': {'StringLike': {'aws:SourceArn': topic}}}
policy['Statement'].append(statement)
queue.set_attribute('Policy', json.dumps(policy))
return resp
def confirm_subscription(self, topic, token,
authenticate_on_unsubscribe=False):
"""
Get properties of a Topic
:type topic: string
:param topic: The ARN of the new topic.
:type token: string
:param token: Short-lived token sent to and endpoint during
the Subscribe operation.
:type authenticate_on_unsubscribe: bool
:param authenticate_on_unsubscribe: Optional parameter indicating
that you wish to disable
unauthenticated unsubscription
of the subscription.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic,
'Token': token}
if authenticate_on_unsubscribe:
params['AuthenticateOnUnsubscribe'] = 'true'
response = self.make_request('ConfirmSubscription', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def unsubscribe(self, subscription):
"""
Allows endpoint owner to delete subscription.
Confirmation message will be delivered.
:type subscription: string
:param subscription: The ARN of the subscription to be deleted.
"""
params = {'ContentType': 'JSON',
'SubscriptionArn': subscription}
response = self.make_request('Unsubscribe', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_all_subscriptions(self, next_token=None):
"""
Get list of all subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {'ContentType': 'JSON'}
if next_token:
params['NextToken'] = next_token
response = self.make_request('ListSubscriptions', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_all_subscriptions_by_topic(self, topic, next_token=None):
"""
Get list of all subscriptions to a specific topic.
:type topic: string
:param topic: The ARN of the topic for which you wish to
find subscriptions.
:type next_token: string
:param next_token: Token returned by the previous call to
this method.
"""
params = {'ContentType': 'JSON',
'TopicArn': topic}
if next_token:
params['NextToken'] = next_token
response = self.make_request('ListSubscriptionsByTopic', params,
'/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
| |
"""
Define names for built-in types that aren't directly accessible as a builtin.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "__next__" attributes instead.
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.__code__)
MappingProxyType = type(type.__dict__)
SimpleNamespace = type(sys.implementation)
def _g():
yield 1
GeneratorType = type(_g())
async def _c(): pass
_c = _c()
CoroutineType = type(_c)
_c.close() # Prevent ResourceWarning
async def _ag():
yield
_ag = _ag()
AsyncGeneratorType = type(_ag)
class _C:
def _m(self): pass
MethodType = type(_C()._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
WrapperDescriptorType = type(object.__init__)
MethodWrapperType = type(object().__str__)
MethodDescriptorType = type(str.join)
ClassMethodDescriptorType = type(dict.__dict__['fromkeys'])
ModuleType = type(sys)
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
tb = None; del tb
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.__code__)
MemberDescriptorType = type(FunctionType.__globals__)
del sys, _f, _g, _C, _c, _ag # Not for export
# Provide a PEP 3115 compliant mechanism for class creation
def new_class(name, bases=(), kwds=None, exec_body=None):
"""Create a class object dynamically using the appropriate metaclass."""
resolved_bases = resolve_bases(bases)
meta, ns, kwds = prepare_class(name, resolved_bases, kwds)
if exec_body is not None:
exec_body(ns)
if resolved_bases is not bases:
ns['__orig_bases__'] = bases
return meta(name, resolved_bases, ns, **kwds)
def resolve_bases(bases):
"""Resolve MRO entries dynamically as specified by PEP 560."""
new_bases = list(bases)
updated = False
shift = 0
for i, base in enumerate(bases):
if isinstance(base, type):
continue
if not hasattr(base, "__mro_entries__"):
continue
new_base = base.__mro_entries__(bases)
updated = True
if not isinstance(new_base, tuple):
raise TypeError("__mro_entries__ must return a tuple")
else:
new_bases[i+shift:i+shift+1] = new_base
shift += len(new_base) - 1
if not updated:
return bases
return tuple(new_bases)
def prepare_class(name, bases=(), kwds=None):
"""Call the __prepare__ method of the appropriate metaclass.
Returns (metaclass, namespace, kwds) as a 3-tuple
*metaclass* is the appropriate metaclass
*namespace* is the prepared class namespace
*kwds* is an updated copy of the passed in kwds argument with any
'metaclass' entry removed. If no kwds argument is passed in, this will
be an empty dict.
"""
if kwds is None:
kwds = {}
else:
kwds = dict(kwds) # Don't alter the provided mapping
if 'metaclass' in kwds:
meta = kwds.pop('metaclass')
else:
if bases:
meta = type(bases[0])
else:
meta = type
if isinstance(meta, type):
# when meta is a type, we first determine the most-derived metaclass
# instead of invoking the initial candidate directly
meta = _calculate_meta(meta, bases)
if hasattr(meta, '__prepare__'):
ns = meta.__prepare__(name, bases, **kwds)
else:
ns = {}
return meta, ns, kwds
def _calculate_meta(meta, bases):
"""Calculate the most derived metaclass."""
winner = meta
for base in bases:
base_meta = type(base)
if issubclass(winner, base_meta):
continue
if issubclass(base_meta, winner):
winner = base_meta
continue
# else:
raise TypeError("metaclass conflict: "
"the metaclass of a derived class "
"must be a (non-strict) subclass "
"of the metaclasses of all its bases")
return winner
class DynamicClassAttribute:
"""Route attribute access on a class to __getattr__.
This is a descriptor, used to define attributes that act differently when
accessed through an instance and through a class. Instance access remains
normal, but access to an attribute through a class will be routed to the
class's __getattr__ method; this is done by raising AttributeError.
This allows one to have properties active on an instance, and have virtual
attributes on the class with the same name (see Enum for an example).
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
# next two lines make DynamicClassAttribute act the same as property
self.__doc__ = doc or fget.__doc__
self.overwrite_doc = doc is None
# support for abstract methods
self.__isabstractmethod__ = bool(getattr(fget, '__isabstractmethod__', False))
def __get__(self, instance, ownerclass=None):
if instance is None:
if self.__isabstractmethod__:
return self
raise AttributeError()
elif self.fget is None:
raise AttributeError("unreadable attribute")
return self.fget(instance)
def __set__(self, instance, value):
if self.fset is None:
raise AttributeError("can't set attribute")
self.fset(instance, value)
def __delete__(self, instance):
if self.fdel is None:
raise AttributeError("can't delete attribute")
self.fdel(instance)
def getter(self, fget):
fdoc = fget.__doc__ if self.overwrite_doc else None
result = type(self)(fget, self.fset, self.fdel, fdoc or self.__doc__)
result.overwrite_doc = self.overwrite_doc
return result
def setter(self, fset):
result = type(self)(self.fget, fset, self.fdel, self.__doc__)
result.overwrite_doc = self.overwrite_doc
return result
def deleter(self, fdel):
result = type(self)(self.fget, self.fset, fdel, self.__doc__)
result.overwrite_doc = self.overwrite_doc
return result
class _GeneratorWrapper:
# TODO: Implement this in C.
def __init__(self, gen):
self.__wrapped = gen
self.__isgen = gen.__class__ is GeneratorType
self.__name__ = getattr(gen, '__name__', None)
self.__qualname__ = getattr(gen, '__qualname__', None)
def send(self, val):
return self.__wrapped.send(val)
def throw(self, tp, *rest):
return self.__wrapped.throw(tp, *rest)
def close(self):
return self.__wrapped.close()
@property
def gi_code(self):
return self.__wrapped.gi_code
@property
def gi_frame(self):
return self.__wrapped.gi_frame
@property
def gi_running(self):
return self.__wrapped.gi_running
@property
def gi_yieldfrom(self):
return self.__wrapped.gi_yieldfrom
cr_code = gi_code
cr_frame = gi_frame
cr_running = gi_running
cr_await = gi_yieldfrom
def __next__(self):
return next(self.__wrapped)
def __iter__(self):
if self.__isgen:
return self.__wrapped
return self
__await__ = __iter__
def coroutine(func):
"""Convert regular generator function to a coroutine."""
if not callable(func):
raise TypeError('types.coroutine() expects a callable')
if (func.__class__ is FunctionType and
getattr(func, '__code__', None).__class__ is CodeType):
co_flags = func.__code__.co_flags
# Check if 'func' is a coroutine function.
# (0x180 == CO_COROUTINE | CO_ITERABLE_COROUTINE)
if co_flags & 0x180:
return func
# Check if 'func' is a generator function.
# (0x20 == CO_GENERATOR)
if co_flags & 0x20:
# TODO: Implement this in C.
co = func.__code__
func.__code__ = CodeType(
co.co_argcount, co.co_kwonlyargcount, co.co_nlocals,
co.co_stacksize,
co.co_flags | 0x100, # 0x100 == CO_ITERABLE_COROUTINE
co.co_code,
co.co_consts, co.co_names, co.co_varnames, co.co_filename,
co.co_name, co.co_firstlineno, co.co_lnotab, co.co_freevars,
co.co_cellvars)
return func
# The following code is primarily to support functions that
# return generator-like objects (for instance generators
# compiled with Cython).
# Delay functools and _collections_abc import for speeding up types import.
import functools
import _collections_abc
@functools.wraps(func)
def wrapped(*args, **kwargs):
coro = func(*args, **kwargs)
if (coro.__class__ is CoroutineType or
coro.__class__ is GeneratorType and coro.gi_code.co_flags & 0x100):
# 'coro' is a native coroutine object or an iterable coroutine
return coro
if (isinstance(coro, _collections_abc.Generator) and
not isinstance(coro, _collections_abc.Coroutine)):
# 'coro' is either a pure Python generator iterator, or it
# implements collections.abc.Generator (and does not implement
# collections.abc.Coroutine).
return _GeneratorWrapper(coro)
# 'coro' is either an instance of collections.abc.Coroutine or
# some other object -- pass it through.
return coro
return wrapped
__all__ = [n for n in globals() if n[:1] != '_']
| |
"""
Timezone-related classes and functions.
"""
import functools
from contextlib import ContextDecorator
from datetime import datetime, timedelta, timezone, tzinfo
import pytz
from asgiref.local import Local
from django.conf import settings
__all__ = [
'utc', 'get_fixed_timezone',
'get_default_timezone', 'get_default_timezone_name',
'get_current_timezone', 'get_current_timezone_name',
'activate', 'deactivate', 'override',
'localtime', 'now',
'is_aware', 'is_naive', 'make_aware', 'make_naive',
]
# UTC time zone as a tzinfo instance.
utc = pytz.utc
_PYTZ_BASE_CLASSES = (pytz.tzinfo.BaseTzInfo, pytz._FixedOffset)
# In releases prior to 2018.4, pytz.UTC was not a subclass of BaseTzInfo
if not isinstance(pytz.UTC, pytz._FixedOffset):
_PYTZ_BASE_CLASSES = _PYTZ_BASE_CLASSES + (type(pytz.UTC),)
def get_fixed_timezone(offset):
"""Return a tzinfo instance with a fixed offset from UTC."""
if isinstance(offset, timedelta):
offset = offset.total_seconds() // 60
sign = '-' if offset < 0 else '+'
hhmm = '%02d%02d' % divmod(abs(offset), 60)
name = sign + hhmm
return timezone(timedelta(minutes=offset), name)
# In order to avoid accessing settings at compile time,
# wrap the logic in a function and cache the result.
@functools.lru_cache()
def get_default_timezone():
"""
Return the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
"""
return pytz.timezone(settings.TIME_ZONE)
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""Return the name of the default time zone."""
return _get_timezone_name(get_default_timezone())
_active = Local()
def get_current_timezone():
"""Return the currently active time zone as a tzinfo instance."""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""Return the name of the currently active time zone."""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""Return the name of ``timezone``."""
return str(timezone)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Set the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, str):
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unset the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(ContextDecorator):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses django.utils.timezone.activate()
to set the timezone on entry and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If it is ``None``, Django enables the default
time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
def __enter__(self):
self.old_timezone = getattr(_active, 'value', None)
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Check if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (
isinstance(value, datetime) and
(settings.USE_TZ if use_tz is None else use_tz) and
not is_naive(value) and
getattr(value, 'convert_to_local_time', True)
)
return localtime(value) if should_convert else value
# Utilities
def localtime(value=None, timezone=None):
"""
Convert an aware datetime.datetime to local time.
Only aware datetimes are allowed. When value is omitted, it defaults to
now().
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if value is None:
value = now()
if timezone is None:
timezone = get_current_timezone()
# Emulate the behavior of astimezone() on Python < 3.6.
if is_naive(value):
raise ValueError("localtime() cannot be applied to a naive datetime")
return value.astimezone(timezone)
def localdate(value=None, timezone=None):
"""
Convert an aware datetime to local time and return the value's date.
Only aware datetimes are allowed. When value is omitted, it defaults to
now().
Local time is defined by the current time zone, unless another time zone is
specified.
"""
return localtime(value, timezone).date()
def now():
"""
Return an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
return datetime.now(tz=utc if settings.USE_TZ else None)
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determine if a given datetime.datetime is aware.
The concept is defined in Python's docs:
https://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is not None
def is_naive(value):
"""
Determine if a given datetime.datetime is naive.
The concept is defined in Python's docs:
https://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is None
def make_aware(value, timezone=None, is_dst=None):
"""Make a naive datetime.datetime in a given time zone aware."""
if timezone is None:
timezone = get_current_timezone()
if _is_pytz_zone(timezone):
# This method is available for pytz time zones.
return timezone.localize(value, is_dst=is_dst)
else:
# Check that we won't overwrite the timezone of an aware datetime.
if is_aware(value):
raise ValueError(
"make_aware expects a naive datetime, got %s" % value)
# This may be wrong around DST changes!
return value.replace(tzinfo=timezone)
def make_naive(value, timezone=None):
"""Make an aware datetime.datetime naive in a given time zone."""
if timezone is None:
timezone = get_current_timezone()
# Emulate the behavior of astimezone() on Python < 3.6.
if is_naive(value):
raise ValueError("make_naive() cannot be applied to a naive datetime")
return value.astimezone(timezone).replace(tzinfo=None)
def _is_pytz_zone(tz):
"""Checks if a zone is a pytz zone."""
return isinstance(tz, _PYTZ_BASE_CLASSES)
def _datetime_ambiguous_or_imaginary(dt, tz):
if _is_pytz_zone(tz):
try:
tz.utcoffset(dt)
except (pytz.AmbiguousTimeError, pytz.NonExistentTimeError):
return True
else:
return False
return tz.utcoffset(dt.replace(fold=not dt.fold)) != tz.utcoffset(dt)
| |
from __future__ import absolute_import
import pytest
from copy import deepcopy
from .forms import SimpleForm
from webob.multidict import MultiDict
from wtforms import TextField, IntegerField
from wtforms.validators import (EqualTo, Length, NumberRange, AnyOf)
from wtforms_dynamic_fields import WTFormsDynamicFields
""" This test module uses PyTest (py.test command) for its testing.
Testing behavior with different, built-in WTForms validators.
These tests mainly poke the validator argument binding and argument
%field_name% replacement.
"""
@pytest.fixture(scope="module")
def setup(request):
""" Initiate the basic POST mockup. """
post = MultiDict()
post.add(u'first_name',u'John')
post.add(u'last_name',u'Doe')
return post
# Below follow the actual tests
def test_validator_equalto_error(setup):
""" Test EqualTo validator
No set - Error situation.
Fields Mobile and Handy are not equal.
"""
post = deepcopy(setup)
post.add(u'mobile', '123456')
post.add(u'handy', '654321')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('mobile','Mobile', TextField)
dynamic_form.add_validator('mobile', EqualTo, 'handy', message='Please fill in the exact same data as handy.')
dynamic_form.add_field('handy','Handy', TextField)
dynamic_form.add_validator('handy', EqualTo, 'mobile', message='Please fill in the exact same data as mobile.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == False
assert form.errors['mobile'] == ['Please fill in the exact same data as handy.']
assert form.errors['handy'] == ['Please fill in the exact same data as mobile.']
assert form.mobile() == '<input id="mobile" name="mobile" type="text" value="123456">'
assert form.handy() == '<input id="handy" name="handy" type="text" value="654321">'
def test_validator_equalto_correct(setup):
""" Test EqualTo validator
No set - No error situation.
Fields Mobile and Handy are equal.
"""
post = deepcopy(setup)
post.add(u'mobile', '123456')
post.add(u'handy', '123456')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('mobile','Mobile', TextField)
dynamic_form.add_validator('mobile', EqualTo, 'handy', message='Please fill in the exact same data as handy.')
dynamic_form.add_field('handy','Handy', TextField)
dynamic_form.add_validator('handy', EqualTo, 'mobile', message='Please fill in the exact same data as mobile.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == True
assert form.mobile() == '<input id="mobile" name="mobile" type="text" value="123456">'
assert form.handy() == '<input id="handy" name="handy" type="text" value="123456">'
def test_validator_equalto_error_multiple(setup):
""" Test EqualTo validator
Multiple sets - Error situation.
Note that only modile_2 and handy_2 are incorrect.
"""
post = deepcopy(setup)
post.add(u'mobile_1', '123456')
post.add(u'handy_1', '123456')
post.add(u'mobile_2', '456789')
post.add(u'handy_2', '987654')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('mobile','Mobile', TextField)
dynamic_form.add_validator('mobile', EqualTo, '%handy%', message='Please fill in the exact same data as %handy%.')
dynamic_form.add_field('handy','Handy', TextField)
dynamic_form.add_validator('handy', EqualTo, '%mobile%', message='Please fill in the exact same data as %mobile%.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == False
assert form.errors['mobile_2'] == ['Please fill in the exact same data as handy_2.']
assert form.errors['handy_2'] == ['Please fill in the exact same data as mobile_2.']
assert form.mobile_1() == '<input id="mobile_1" name="mobile_1" type="text" value="123456">'
assert form.handy_1() == '<input id="handy_1" name="handy_1" type="text" value="123456">'
assert form.mobile_2() == '<input id="mobile_2" name="mobile_2" type="text" value="456789">'
assert form.handy_2() == '<input id="handy_2" name="handy_2" type="text" value="987654">'
def test_validator_length_error(setup):
""" Test Length validator
No set - Error situation.
Field middle_name is too short.
"""
post = deepcopy(setup)
post.add(u'middle_name', 'foo')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('middle_name','Middle Name', TextField)
dynamic_form.add_validator('middle_name', Length, min=4, max=10, message='Please enter length between 4 and 10 characters.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == False
assert form.errors['middle_name'] == ['Please enter length between 4 and 10 characters.']
assert form.middle_name() == '<input id="middle_name" name="middle_name" type="text" value="foo">'
def test_validator_length_correct(setup):
""" Test Length validator
No set - No error situation.
Field middle_name is of correct length.
"""
post = deepcopy(setup)
post.add(u'middle_name', 'foobar')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('middle_name','Middle Name', TextField)
dynamic_form.add_validator('middle_name', Length, min=4, max=10, message='Please enter length between 4 and 10 characters.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == True
assert form.middle_name() == '<input id="middle_name" name="middle_name" type="text" value="foobar">'
def test_validator_length_error_multiple(setup):
""" Test Length validator
Multiple sets - Error situation.
Note that only middle_name_1 is correct.
"""
post = deepcopy(setup)
post.add(u'middle_name_1', 'foobar')
post.add(u'middle_name_2', 'foo')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('middle_name','Middle Name', TextField)
dynamic_form.add_validator('middle_name', Length, min=4, max=10, message='Please enter length between 4 and 10 characters.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == False
assert form.errors['middle_name_2'] == ['Please enter length between 4 and 10 characters.']
assert form.middle_name_1() == '<input id="middle_name_1" name="middle_name_1" type="text" value="foobar">'
assert form.middle_name_2() == '<input id="middle_name_2" name="middle_name_2" type="text" value="foo">'
def test_validator_numberrange_error(setup):
""" Test NumberRange validator
No set - Error situation.
Field age is outside the range.
"""
post = deepcopy(setup)
post.add(u'age', '20')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('age','Age', IntegerField)
dynamic_form.add_validator('age', NumberRange, min=30, max=40, message='Please enter an age between %(min)s to %(max)s.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == False
assert form.errors['age'] == ['Please enter an age between 30 to 40.']
assert form.age() == '<input id="age" name="age" type="text" value="20">'
def test_validator_numberrange_success(setup):
""" Test NumberRange validator
No set - No error situation.
Field age is within range.
"""
post = deepcopy(setup)
post.add(u'age', '32')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('age','Age', IntegerField)
dynamic_form.add_validator('age', NumberRange, min=30, max=40, message='Please enter an age between %(min)s to %(max)s.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == True
assert form.age() == '<input id="age" name="age" type="text" value="32">'
def test_validator_numberrange_error_multiple(setup):
""" Test NumberRange validator
Sets - Error situation.
Note, only age_3 is within range.
"""
post = deepcopy(setup)
post.add(u'age_1', '4')
post.add(u'age_2', '12')
post.add(u'age_3', '30')
post.add(u'age_4', '42')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('age','Age', IntegerField)
dynamic_form.add_validator('age', NumberRange, min=30, max=40, message='Please enter an age between %(min)s to %(max)s.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == False
assert form.errors['age_1'] == ['Please enter an age between 30 to 40.']
assert form.errors['age_2'] == ['Please enter an age between 30 to 40.']
assert form.errors['age_4'] == ['Please enter an age between 30 to 40.']
assert form.age_1() == '<input id="age_1" name="age_1" type="text" value="4">'
assert form.age_2() == '<input id="age_2" name="age_2" type="text" value="12">'
assert form.age_3() == '<input id="age_3" name="age_3" type="text" value="30">'
assert form.age_4() == '<input id="age_4" name="age_4" type="text" value="42">'
def test_validator_anyof_error(setup):
""" Test NumberRange validator
No set - Error situation.
Field hobby has an invalid selection.
"""
post = deepcopy(setup)
post.add(u'hobby', 'photography')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('hobby','Hobby', TextField)
dynamic_form.add_validator('hobby', AnyOf, ['cylcing','swimming','hacking'], message='Please enter only allowed hobbies.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == False
assert form.errors['hobby'] == ['Please enter only allowed hobbies.']
assert form.hobby() == '<input id="hobby" name="hobby" type="text" value="photography">'
def test_validator_anyof_success(setup):
""" Test NumberRange validator
No set - No error situation.
Field hobby has a valid selection.
"""
post = deepcopy(setup)
post.add(u'hobby', 'swimming')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('hobby','Hobby', TextField)
dynamic_form.add_validator('hobby', AnyOf, ['cylcing','swimming','hacking'], message='Please enter only allowed hobbies.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == True
assert form.hobby() == '<input id="hobby" name="hobby" type="text" value="swimming">'
def test_validator_anyof_error_multiple(setup):
""" Test AnyOf validator
Sets - Error situation.
Note, only hobby_3 has a valid hobby.
"""
post = deepcopy(setup)
post.add(u'hobby_1', 'sleeping')
post.add(u'hobby_2', 'eating')
post.add(u'hobby_3', 'swimming')
post.add(u'hobby_4', 'gaming')
dynamic_form = WTFormsDynamicFields()
dynamic_form.add_field('hobby','Hobby', TextField)
dynamic_form.add_validator('hobby', AnyOf, ['cylcing','swimming','hacking'], message='Please enter only allowed hobbies.')
form = dynamic_form.process(SimpleForm,
post)
form.validate()
assert form.validate() == False
assert form.errors['hobby_1'] == ['Please enter only allowed hobbies.']
assert form.errors['hobby_2'] == ['Please enter only allowed hobbies.']
assert form.errors['hobby_4'] == ['Please enter only allowed hobbies.']
assert form.hobby_1() == '<input id="hobby_1" name="hobby_1" type="text" value="sleeping">'
assert form.hobby_2() == '<input id="hobby_2" name="hobby_2" type="text" value="eating">'
assert form.hobby_3() == '<input id="hobby_3" name="hobby_3" type="text" value="swimming">'
assert form.hobby_4() == '<input id="hobby_4" name="hobby_4" type="text" value="gaming">'
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
qplotutils.chart.view
---------------------
Base widget that provides the view for all charts including axis, legend and zooming and panning capabilities.
"""
import logging
import math
import numpy as np
from qtpy.QtCore import Signal, Qt, QPointF, QRectF, QSizeF
from qtpy.QtGui import (
QPen,
QBrush,
QColor,
QPainter,
QPainterPath,
QFontMetrics,
QFont,
QPicture,
QTransform,
)
from qtpy.QtWidgets import (
QGraphicsItem,
QStyleOptionGraphicsItem,
QGraphicsWidget,
QGraphicsView,
QFrame,
QGraphicsScene,
QSizePolicy,
QGraphicsGridLayout,
QAction,
)
from qplotutils import QPlotUtilsException
from . import LOG_LEVEL
from .items import ChartItem, ChartItemFlags
from .utils import makePen
from .. import CONFIG
__author__ = "Philipp Baust"
__copyright__ = "Copyright 2019, Philipp Baust"
__credits__ = []
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Philipp Baust"
__email__ = "philipp.baust@gmail.com"
__status__ = "Development"
_log = logging.getLogger(__name__)
_log.setLevel(LOG_LEVEL)
class Style(object):
""" Borg for global styling.
TODO: Not yet implemented
"""
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
self.grid_color = QColor(90, 90, 90, 255)
class ChartLegend(ChartItem):
""" Legend for chart views. """
def __init__(self):
""" Displays the chart item in a legend table. """
super(
ChartLegend, self
).__init__() # Pycharm / pyLint inspection error. Please ignore
self.setFlags(
QGraphicsItem.ItemIsMovable | QGraphicsItem.ItemIgnoresTransformations
)
self._picture = None
self._bRect = QRectF(0, 0, 200, 50)
self._entries = []
self.font = QFont("Helvetica [Cronyx]", 10, QFont.Normal)
self.fontFlags = Qt.TextDontClip | Qt.AlignLeft | Qt.AlignVCenter
self.setVisible(False)
def addEntry(self, chart_item):
""" Slot. Adds an entry for the given chart item to the legend.
:param chart_item:
"""
_log.debug("Add entry chart item: {}".format(chart_item.label))
self._entries.append(chart_item)
self._updateBoundingRect()
self._updatePicture()
# self.setVisible(True)
def removeEntry(self, chart_item):
""" Slot. Removes the entry for the given chart item to the legend.
:param chart_item: chart item such as line chart item, e.g.
"""
_log.debug("Remove chart item entry: {}".format(chart_item.label))
self._entries.remove(chart_item)
self._updateBoundingRect()
self._updatePicture()
def _updateBoundingRect(self):
metrics = QFontMetrics(self.font)
runWidth = 0
for entry in self._entries:
# (text, color, width, tick) = entry
if entry.label is not None:
w = metrics.width(entry.label)
if w > runWidth:
runWidth = w
self._bRect.setWidth(runWidth + 40)
self._bRect.setHeight(len(self._entries) * 20 + 8)
def _updatePicture(self):
self._picture = QPicture()
painter = QPainter(self._picture)
self._generatePicture(painter)
painter.end()
def _generatePicture(self, p=QPainter()):
p.setPen(QPen(QColor("#aaaaaa")))
p.setBrush(QBrush(QColor(255, 255, 255, 80), Qt.SolidPattern))
p.drawRoundedRect(self._bRect, 2, 2)
for k, entry in enumerate(self._entries):
# (text, color, width, tick) = entry
p.setBrush(QBrush(entry.color, Qt.SolidPattern))
p.setPen(QPen(Qt.transparent))
p.drawRect(6, 8 + k * 20, 11, 11)
p.setBrush(QBrush(Qt.transparent))
p.setPen(QPen(QColor("#FFFFFF")))
tickRect = QRectF(24, 8 + k * 20, self._bRect.width() - 24, 11)
p.drawText(tickRect, self.fontFlags, "{}".format(entry.label))
def boundingRect(self):
return self._bRect
def shape(self):
path = QPainterPath()
path.addRect(self._bRect)
return path
def paint(self, p=QPainter(), o=QStyleOptionGraphicsItem(), widget=None):
if self._picture is None:
return
self._picture.play(p)
def __del__(self):
_log.debug("Finalizing: {}".format(self))
class ChartView(QGraphicsView):
""" Widget that display chart items.
"""
#: point of origin upper left corner, x axis to the right, y axis to the bottom
DEFAULT_ORIENTATION = QTransform(1, 0, 0, 1, 0, 0)
#: point of origin lower left corner, x axis to the right, y axis to the top
CARTESIAN = QTransform(1, 0, 0, -1, 0, 0)
#: point of origin lower right corner, x axis to the top, y axis to the left
AUTOSAR = QTransform(0, -1, -1, 0, 0, 0)
#: Reference corner for map keys (e.g. legend)
TOP_LEFT = "top_left"
TOP_RIGHT = "top_right"
BOTTOM_LEFT = "bottom_left"
BOTTOM_RIGHT = "bottom_right"
def __init__(self, parent=None, orientation=DEFAULT_ORIENTATION):
super(ChartView, self).__init__(parent)
self.setFocusPolicy(Qt.StrongFocus)
self.setFrameShape(QFrame.NoFrame)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setTransformationAnchor(QGraphicsView.NoAnchor)
self.setResizeAnchor(QGraphicsView.AnchorViewCenter)
self.setViewportUpdateMode(QGraphicsView.MinimalViewportUpdate)
self.setRenderHints(QPainter.SmoothPixmapTransform | QPainter.Antialiasing)
self.setContextMenuPolicy(Qt.CustomContextMenu)
# self.customContextMenuRequested.connect(self.on_context_menu)
self.setAcceptDrops(False)
scene = QGraphicsScene()
self.setScene(scene)
self.centralWidget = ChartWidget()
self.scene().addItem(self.centralWidget)
b_rect = QRectF(0, 0, self.size().width() - 2, self.size().height() - 2)
self.centralWidget.setGeometry(b_rect)
self.setBackgroundBrush(QBrush(Qt.black, Qt.SolidPattern))
self.centralWidget.area.getRootItem().setTransform(orientation)
self._map_keys = []
# Legend
self._make_legend()
# Toggle chart legend
self.legendAction = QAction("Legend", self)
self.legendAction.setCheckable(True)
self.legendAction.triggered.connect(self.__toggle_legend)
# Force aspect ratio
self.apect1by1Action = QAction("Aspect ratio 1:1", self)
self.apect1by1Action.setCheckable(True)
self.apect1by1Action.toggled.connect(self.__toggle_apect1by1)
self._dbg_box_color = Qt.blue
def __toggle_legend(self, checked):
self._legend.setVisible(checked)
def resizeEvent(self, event):
self.__relayout()
def __relayout(self):
b_rect = QRectF(0, 0, self.size().width() - 3, self.size().height() - 3)
self.centralWidget.setGeometry(b_rect)
# self.autoRange()
self.centralWidget.area.axisChange()
self.__layout_map_keys()
def __layout_map_keys(self):
a = self.centralWidget.area.size()
b = self.centralWidget.area.pos()
for r in self._map_keys:
c, dx, dy, item = r["corner"], r["x"], r["y"], r["Item"]
if c == self.TOP_RIGHT:
# TODO: Does not work until the window is resized...
x = b.x() + a.width() - item.boundingRect().width() - dx
y = b.y() + dy
elif c == self.BOTTOM_LEFT:
x = b.x() + dx
y = b.y() + a.height() - item.boundingRect().height() - dy
elif c == self.BOTTOM_RIGHT:
x = self.width() - item.boundingRect().width() - dx
y = b.y() + a.height() - item.boundingRect().height() - dy
else:
x = dx
y = dy
item.setPos(x, y)
def showEvent(self, event):
self.centralWidget.area.axisChange()
self.__layout_map_keys()
def setCoordinatesOrientation(self, orientation=DEFAULT_ORIENTATION):
self.centralWidget.area.getRootItem().setTransform(orientation)
self.centralWidget.area.axisChange()
def setVisibleRange(self, rect):
self.centralWidget.area.setRange(rect)
def addItem(self, item=ChartItem()):
item.setParentItem(self.centralWidget.area.getRootItem())
self.centralWidget.area.visibleRangeChange.connect(item.visibleRangeChanged)
# self.connect(self.centralWidget.area, SIGNAL("visibleRangeChange"), item.visibleRangeChanged)
if not item.chartItemFlags & ChartItemFlags.FLAG_NO_LABEL:
self.legend.addEntry(item)
def removeItem(self, item):
if self.legend and not item.chartItemFlags & ChartItemFlags.FLAG_NO_LABEL:
self.legend.removeEntry(item)
item.setParentItem(None)
def _make_legend(self):
legend = ChartLegend()
self.scene().addItem(legend)
# parent_w = self.size().width()
# legend.setPos(parent_w - legend.boundingRect().width() - 15, 40)
# return legend
self._map_keys.append(
{"corner": self.TOP_RIGHT, "x": 10, "y": 10, "Item": legend}
)
def autoRange(self):
self.centralWidget.area.autoRange()
def setRange(self, rect):
self.centralWidget.area.setRange(rect)
def __toggle_apect1by1(self, checked):
_log.debug("Aspect 1:1")
if checked:
self.centralWidget.area.setAspectRatio(1.0)
else:
self.centralWidget.area.setAspectRatio(None)
@property
def aspectRatio(self):
return self.centralWidget.area.aspectRatio
def setAspectRatio(self, value):
self.centralWidget.area.setAspectRatio(value)
def setLegendVisible(self, visible, corner=TOP_RIGHT):
if visible:
self._map_keys[0]["corner"] = corner
self.__layout_map_keys()
self._map_keys[0]["Item"].setVisible(visible)
@property
def legend(self):
return self._map_keys[0]["Item"]
def add_chart_key(self, key_item, corner):
self.scene().addItem(key_item)
self._map_keys.append({"corner": corner, "x": 10, "y": 10, "Item": key_item})
self.__layout_map_keys()
# @property
# def legend(self):
# return self._legend.isVisible()
#
# @legend.setter
# def legend(self, value):
# # TODO: legend should track its position by itself and relative to parent
# parent_w = self.size().width()
# self._legend.setPos(parent_w - self._legend.boundingRect().width() - 15, 40)
# self._legend.setVisible(value)
def setMaxVisibleRange(self, rect):
self.centralWidget.area.setMaxVisibleRange(rect)
@property
def title(self):
return self.centralWidget.title
@title.setter
def title(self, value):
self.centralWidget.title = value
self.__layout_map_keys()
@property
def horizontalLabel(self):
return self.centralWidget.horizontalLabel
@horizontalLabel.setter
def horizontalLabel(self, value):
self.centralWidget.horizontalLabel = value
self.__layout_map_keys()
@property
def verticalLabel(self):
return self.centralWidget.verticalLabel
@verticalLabel.setter
def verticalLabel(self, value):
self.centralWidget.verticalLabel = value
self.__layout_map_keys()
def addSecondaryHorizontalAxis(self, axis):
self.centralWidget.addSecondaryHorizontalAxis(axis)
self.__relayout()
def addSecondaryVerticalAxis(self, axis):
self.centralWidget.addSecondaryVerticalAxis(axis)
self.__layout_map_keys()
def __repr__(self):
return "<ChartView>"
def __del__(self):
_log.debug("Finalizing: {}".format(self))
class ChartWidget(QGraphicsWidget):
""" Provides the base layout and adds the axis to bottom and left side.
.. note:: Is instantiated and connected by the parent chart view.
:param parent: the Chart view
"""
def __init__(self, parent=None):
super(ChartWidget, self).__init__(parent)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setLayout(QGraphicsGridLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.layout().setHorizontalSpacing(-1)
self.layout().setVerticalSpacing(-1)
# optional title
self.title_widget = ChartLabel(self)
self.title_widget.font = QFont("Helvetica [Cronyx]", 14, QFont.Bold)
self.layout().addItem(self.title_widget, 0, 0, 1, 4)
self.layout().setRowFixedHeight(0, 0)
self.title_widget.setVisible(False)
# optional left axis label
self.vertical_axis_label = VerticalChartLabel(self)
self.layout().addItem(self.vertical_axis_label, 2, 0, 1, 1)
self.layout().setColumnFixedWidth(0, 0)
self.vertical_axis_label.setVisible(False)
self.main_vertical_axis = VerticalAxis(self)
self.layout().addItem(self.main_vertical_axis, 2, 1, 1, 1)
self.layout().setColumnFixedWidth(1, 60)
# canvas for the items
self.area = ChartArea(self)
self.layout().addItem(self.area, 2, 2, 1, 1)
self.main_horizontal_axis = HorizontalAxis(self)
self.layout().addItem(self.main_horizontal_axis, 3, 2, 1, 1)
self.layout().setRowFixedHeight(3, 30)
# optional bottom axis label
self.horizontal_axis_label = ChartLabel(self)
self.layout().addItem(self.horizontal_axis_label, 4, 2, 1, 1)
self.layout().setRowFixedHeight(4, 0)
self.horizontal_axis_label.setVisible(False)
self.area.vAxisChange.connect(self.main_vertical_axis.axisChange)
self.area.hAxisChange.connect(self.main_horizontal_axis.axisChange)
self._dbg_box_color = Qt.green
@property
def title(self):
return self.title_widget.label
@title.setter
def title(self, value):
if value is None:
self.title_widget.setVisible(False)
self.layout().setRowFixedHeight(0, 0)
else:
self.title_widget.label = value
self.title_widget.setVisible(True)
self.layout().setRowFixedHeight(0, 20)
@property
def verticalLabel(self):
return self.vertical_axis_label.label
@verticalLabel.setter
def verticalLabel(self, value):
if value is None:
self.vertical_axis_label.setVisible(False)
self.layout().setColumnFixedWidth(0, 0)
else:
self.vertical_axis_label.label = value
self.vertical_axis_label.setVisible(True)
self.layout().setColumnFixedWidth(0, 20)
@property
def horizontalLabel(self):
return self.horizontal_axis_label.label
@horizontalLabel.setter
def horizontalLabel(self, value):
if value is None:
self.horizontal_axis_label.setVisible(False)
self.layout().setRowFixedHeight(4, 0)
else:
self.horizontal_axis_label.label = value
self.horizontal_axis_label.setVisible(True)
self.layout().setRowFixedHeight(4, 20)
def addSecondaryHorizontalAxis(self, axis):
""" Adds a second horizontal axis on top to the plot.
:param axis: secondary Axis
:return:
"""
axis.setParent(self)
self.layout().addItem(axis, 1, 2, 1, 1)
self.layout().setRowFixedHeight(1, 30)
self.area.hAxisChange.connect(axis.axisChange)
def addSecondaryVerticalAxis(self, axis):
""" Adds a second vertical axis on top to the plot.
:param axis: secondary Axis
:return:
"""
axis.setParent(self)
self.layout().addItem(axis, 2, 3, 1, 1)
# self.layout().setColumnFixedWidth(3, 6)
self.area.vAxisChange.connect(axis.axisChange)
def boundingRect(self):
b_rect = QRectF(0, 0, self.size().width(), self.size().height())
return b_rect
def paint(self, p=QPainter(), o=QStyleOptionGraphicsItem(), widget=None):
if CONFIG.debug:
# b_rect = QRectF(0, 0, self.size().width(), self.size().height())
p.setPen(QPen(self._dbg_box_color))
p.drawRect(self.boundingRect())
def __repr__(self):
return "<ChartWidget>"
def __del__(self):
_log.debug("Finalizing: {}".format(self))
def wheelEvent(self, e):
e.accept() # but do nothing
_log.debug("Wheel on axis is ignored")
class ChartLabel(QGraphicsWidget):
def __init__(self, label="", parent=None):
super(ChartLabel, self).__init__(parent)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self.setFlags(
QGraphicsItem.ItemClipsChildrenToShape
) # | QGraphicsItem.ItemIsFocusable)
self.font = QFont("Helvetica [Cronyx]", 11, QFont.Normal)
self.font_flags = Qt.TextDontClip | Qt.AlignHCenter | Qt.AlignVCenter
self.color = QColor(110, 110, 110, 255)
self.label = label
def paint(self, p=QPainter(), o=QStyleOptionGraphicsItem(), widget=None):
p.setRenderHint(QPainter.Antialiasing, False)
if self._picture is None:
self._refreshPicture()
self._picture.play(p)
if CONFIG.debug:
p.setPen(QPen(self._dbg_box_color))
p.drawRect(self.boundingRect())
def _refreshPicture(self):
""" Repaints the picture that is played in the paint method. """
self._picture = QPicture()
painter = QPainter(self._picture)
self._generatePicture(painter)
painter.end()
def resizeEvent(self, event):
_log.debug("Resize Event")
self._refreshPicture()
def _generatePicture(self, p=QPainter()):
p.setBrush(QBrush(Qt.transparent))
p.setPen(QPen(self.color))
p.setFont(self.font)
p.drawText(self.boundingRect(), self.font_flags, "{}".format(self.label))
def __repr__(self):
return "<ChartLabel>"
def __del__(self):
_log.debug("Finalizing: {}".format(self))
class VerticalChartLabel(ChartLabel):
def __init__(self, label="", parent=None):
super(VerticalChartLabel, self).__init__(label, parent)
def _generatePicture(self, p=QPainter()):
r = self.boundingRect()
p.rotate(-90)
rr = QRectF(-r.height(), 0, r.height(), r.width())
if CONFIG.debug:
p.setPen(QPen(Qt.red))
p.drawRect(rr)
p.setBrush(QBrush(Qt.transparent))
p.setPen(QPen(self.color))
p.setFont(self.font)
p.drawText(rr, self.font_flags, "{}".format(self.label))
p.rotate(90)
class ChartAxis(QGraphicsWidget):
""" Base implementation for all chart axes.
:param parent: a chart widget.
"""
def __init__(self, parent=None):
super(ChartAxis, self).__init__(parent)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self.setFlags(
QGraphicsItem.ItemClipsChildrenToShape
) # | QGraphicsItem.ItemIsFocusable)
self.font = QFont("Helvetica [Cronyx]", 9, QFont.Normal)
self.flags = Qt.TextDontClip | Qt.AlignRight | Qt.AlignVCenter
self.gridColor = QColor(80, 80, 80, 255)
self.tickFormat = "{0:G}"
self._areaTransform = None
self._picture = None
self.setZValue(-1)
self._dbg_box_color = Qt.yellow
def boundingRect(self):
return QRectF(QPointF(0, 0), self.size())
def paint(self, p=QPainter(), o=QStyleOptionGraphicsItem(), widget=None):
p.setRenderHint(QPainter.Antialiasing, False)
if self._picture is None:
self._refreshPicture()
self._picture.play(p)
if CONFIG.debug:
p.setPen(QPen(self._dbg_box_color))
p.drawRect(self.boundingRect())
def _refreshPicture(self):
""" Repaints the picture that is played in the paint method. """
self._picture = QPicture()
painter = QPainter(self._picture)
self._generatePicture(painter)
painter.end()
def resizeEvent(self, event):
_log.debug("Resize Event")
self._refreshPicture()
def axisChange(self, transform):
# _log.debug("Axis change: {}".format(transform))
self._areaTransform = transform
self._refreshPicture()
self.update()
def _generatePicture(self, p=QPainter()):
p.setPen(QPen(Qt.green))
p.drawRect(self.boundingRect())
def calcTicks(self, shift, scaling, displayRange, maxGridSpace=80, minGridSpace=40):
""" Calculates the axis ticks.
The ticks are calculated along the logarithm of the base 10 of the displayed value range.
The resulting exponent for the ticks is than scaled with respect to the preferred number of
ticks and the value range.
In case the value range would cause more ticks as previously determined the exponent is incremented
by 1. Otherwise if the exponent results in to less ticks, the exponent is divided by 2.
:param shift: offset from point of origin along the current axis (m31 / m32 from transform)
:param scaling: scaling of scene (m11 / m12 / m21 / m22 from transform)
:param displayRange: range of visible pixels
:param maxGridSpace: maximum space between gridlines
:param minGridSpace: minimum space between gridlines
:return: list of ticks (tuple of position and label) and the required tick with
"""
# first lets see how many ticks can be placed on the axis
minNumberOfGridLines = displayRange / float(maxGridSpace)
maxNumberOfGridLines = displayRange / float(minGridSpace)
# Calculate the up most and lowest value on axis
upperValue = -shift / scaling
lowerValue = (displayRange - shift) / scaling
valueRange = abs(upperValue - lowerValue)
# _log.debug("Value range: {}".format(valueRange))
if valueRange == 0:
_log.debug("Value range is 0")
log10Exponent = 0
else:
log10Exponent = math.floor(math.log10(valueRange)) - 1
# Fulfill the minimum gridline constraint
while valueRange / 10 ** log10Exponent > maxNumberOfGridLines:
log10Exponent += 1
# fulfill the max gridlines constraint
tickDistance = 10 ** log10Exponent
while valueRange / tickDistance < minNumberOfGridLines:
tickDistance *= 0.5
# _log.debug("Tick info: log10 {}".format(log10Exponent))
first_pos_idx = int(upperValue / tickDistance)
last_pos_idx = int(lowerValue / tickDistance)
if first_pos_idx < last_pos_idx:
d = 1
last_pos_idx += 1
else:
d = -1
last_pos_idx -= 1
required_tick_width = 0
metrics = QFontMetrics(self.font)
ticks = []
for k in range(first_pos_idx, last_pos_idx, d):
pos = round(k * tickDistance * scaling + shift)
value = k * tickDistance
tickString = self.tickFormat.format(value)
ticks.append((pos, tickString))
cur_tick_width = metrics.width(tickString) + 5
if cur_tick_width > required_tick_width:
required_tick_width = cur_tick_width
return ticks, required_tick_width
def __repr__(self):
return "<ChartAxis>"
def __del__(self):
_log.debug("Finalizing: {}".format(self))
class VerticalAxis(ChartAxis):
""" Vertical chart axis. """
def boundingRect(self):
parent = self.parentWidget().size()
s = self.size()
b_rect = QRectF(0, 0, parent.width(), s.height())
return b_rect
def _generatePicture(self, p=QPainter()):
p.setBrush(Qt.transparent)
p.setPen(makePen(self.gridColor))
p.setFont(self.font)
if self._areaTransform is None:
return
parent_w = (
self.parentWidget().area.size().width() + self.size().width() - 2
) # self.parentWidget().size().width()
parent_h = self.parentWidget().size().height()
translation = self._areaTransform.m32()
scaling = (
self._areaTransform.m12() + self._areaTransform.m22()
) # only for rotations along 90 degrees
if scaling == 0:
_log.debug("????")
return
ticks, run_width = self.calcTicks(translation, scaling, parent_h)
self.parentWidget().layout().setColumnFixedWidth(1, run_width + 10)
p.drawLine(self.size().width(), 0, self.size().width(), self.size().height())
for pos, tickString in ticks:
if 10 < pos < self.size().height():
p.drawLine(run_width + 6, round(pos), parent_w, round(pos))
tickRect = QRectF(0, pos - 4, run_width + 2, 10)
p.drawText(tickRect, self.flags, tickString)
class HorizontalAxis(ChartAxis):
""" Horizontal chart axis. """
def __init__(self, parent=None):
"""
:param parent:
:return:
"""
super(HorizontalAxis, self).__init__(parent)
self.flags = Qt.TextDontClip | Qt.AlignCenter | Qt.AlignVCenter
def boundingRect(self):
parent = self.parentWidget().area.size()
v_axis = self.parentWidget().main_vertical_axis.size()
s = self.size()
b_rect = QRectF(
-v_axis.width(),
-parent.height(),
s.width() + v_axis.width(),
parent.height() + s.height(),
)
return b_rect
def _generatePicture(self, p=QPainter()):
p.setBrush(Qt.transparent)
pen = QPen(QBrush(self.gridColor), 1.0)
pen.setCosmetic(True)
p.setPen(pen)
p.setFont(self.font)
if self._areaTransform is None:
return
parent_h = self.parentWidget().area.size().height() - 2
shift = self._areaTransform.m31()
scaling = self._areaTransform.m11() + self._areaTransform.m21()
displayRange = self.size().width()
ticks, run_width = self.calcTicks(
shift, scaling, displayRange, maxGridSpace=100, minGridSpace=80
)
rw = run_width / 2.0
p.drawLine(0, 0, self.size().width(), 0)
for pos, tickString in ticks:
if 0 < pos < self.size().width() - 30:
p.drawLine(round(pos), 5, round(pos), -parent_h)
tickRect = QRectF(pos - rw, 8, run_width, 10)
p.drawText(tickRect, self.flags, tickString)
class SecondaryHorizontalAxis(HorizontalAxis):
""" Horizontal axis with a different tick scale.
This is useful if e.g. your data is sampled on its own timescale but could also be represented in UTC time.
And you want to see both abscissa values.
"""
def __init__(self, main_axis_values, secondary_axis_values, parent=None):
""" Due to free zooming and ranging on the
:param main_axis_values: values on the main axis.
:param secondary_axis_values: corresponding value on the main axis.
:param parent: parent item
"""
super(SecondaryHorizontalAxis, self).__init__(parent)
# self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed))
self.font = QFont("Helvetica [Cronyx]", 9, QFont.Normal)
self.tickFormat = "{0:1.4G}"
self.main_axis_values = main_axis_values
self.secondary_axis_values = secondary_axis_values
self._dbg_box_color = Qt.magenta
def boundingRect(self):
area = self.parentWidget().area.size()
v_axis = self.parentWidget().main_vertical_axis.size()
s = self.size()
b_rect = QRectF(
-v_axis.width(), 0, s.width() + v_axis.width(), area.height() + s.height()
)
return b_rect
def _generatePicture(self, p=QPainter()):
p.setBrush(Qt.transparent)
pen = QPen(QBrush(self.gridColor), 1.0)
pen.setCosmetic(True)
p.setPen(pen)
p.setFont(self.font)
p.drawLine(0, self.size().height(), self.size().width(), self.size().height())
pen = QPen(QBrush(QColor(188, 136, 184, 255)), 1.0, style=Qt.DotLine)
pen.setCosmetic(True)
p.setPen(pen)
if self._areaTransform is None:
return
parent_h = self.parentWidget().area.size().height()
shift = self._areaTransform.m31()
scaling = self._areaTransform.m11() + self._areaTransform.m21()
displayRange = self.size().width()
ticks, run_width = self.calcTicks(
shift, scaling, displayRange, maxGridSpace=100, minGridSpace=80
)
rw = run_width / 2.0
for pos, tickString in ticks:
if 0 < pos < self.size().width() - 30:
p.drawLine(
round(pos),
self.size().height() - 5,
round(pos),
parent_h + self.size().height(),
)
tickRect = QRectF(pos - rw, self.size().height() - 18, run_width, 10)
p.drawText(tickRect, self.flags, tickString)
def calcTicks(self, shift, scaling, displayRange, maxGridSpace=80, minGridSpace=40):
""" Calculates the axis ticks.
The ticks are calculated along the logarithm of the base 10 of the displayed value range.
The resulting exponent for the ticks is than scaled with respect to the preferred number of
ticks and the value range.
In case the value range would cause more ticks as previously determined the exponent is incremented
by 1. Otherwise if the exponent results in to less ticks, the exponent is divided by 2.
:param shift: offset from point of origin along the current axis (m31 / m32 from transform)
:param scaling: scaling of scene (m11 / m12 / m21 / m22 from transform)
:param displayRange: range of visible pixels
:param maxGridSpace: maximum space between gridlines
:param minGridSpace: minimum space between gridlines
:return: list of ticks (tuple of position and label) and the required tick with
"""
# first lets see how many ticks can be placed on the axis
minNumberOfGridLines = displayRange / float(maxGridSpace)
maxNumberOfGridLines = displayRange / float(minGridSpace)
if maxNumberOfGridLines == 0:
return [], 0
# Calculate the up most and lowest value on axis
lowerValue = -shift / scaling
upperValue = (displayRange - shift) / scaling
idx, = np.where(
np.logical_and(
self.main_axis_values <= upperValue, self.main_axis_values >= lowerValue
)
)
required_tick_width = 0
metrics = QFontMetrics(self.font)
positions = np.ones(int(maxNumberOfGridLines) + 5, np.float) * np.inf
c = 0
labels = []
for k in idx:
v = self.main_axis_values[k]
pos = round(v * scaling + shift)
value = round(self.secondary_axis_values[k], 10)
tickString = self.tickFormat.format(value)
if np.min(np.abs(positions - pos)) < minGridSpace:
# _log.debug("Avoided tick collision")
continue
positions[c] = pos
c += 1
labels.append(tickString)
cur_tick_width = metrics.width(tickString)
if cur_tick_width > required_tick_width:
required_tick_width = cur_tick_width
positions = positions[:c]
return zip(positions, labels), required_tick_width
class SecondaryVerticalAxis(VerticalAxis):
""" Vertical chart axis. """
def __init__(self, main_axis_values, secondary_axis_values, parent=None):
""" Due to free zooming and ranging on the
:param main_axis_values: values on the main axis.
:param secondary_axis_values: corresponding value on the main axis.
:param parent: parent item
"""
super(SecondaryVerticalAxis, self).__init__(parent)
self.font = QFont("Helvetica [Cronyx]", 9, QFont.Normal)
self.flags = Qt.TextDontClip | Qt.AlignLeft | Qt.AlignVCenter
self.tickFormat = "{0:1.4G}"
self.main_axis_values = main_axis_values
self.secondary_axis_values = secondary_axis_values
if len(main_axis_values) != len(secondary_axis_values):
raise QPlotUtilsException("list length must be equal")
self._dbg_box_color = Qt.magenta
def boundingRect(self):
parent = self.parentWidget().size()
s = self.size()
b_rect = QRectF(-10, 0, parent.width(), s.height() + 20)
return b_rect
def _generatePicture(self, p=QPainter()):
pen = QPen(QBrush(self.gridColor), 1.0)
pen.setCosmetic(True)
p.setPen(pen)
p.setBrush(Qt.transparent)
p.setFont(self.font)
p.drawLine(0, 0, 0, self.size().height())
if self._areaTransform is None:
return
parent_w = self.parentWidget().area.size().width() - 2
shift = self._areaTransform.m32()
scaling = (
self._areaTransform.m12() + self._areaTransform.m22()
) # only for rotations along 90 degrees
displayRange = self.size().height()
if scaling == 0:
_log.debug("Scaling is 0")
return
ticks, run_width = self.calcTicks(shift, scaling, displayRange)
if run_width == 0:
self.parentWidget().layout().setColumnFixedWidth(3, 0)
else:
self.parentWidget().layout().setColumnFixedWidth(3, run_width + 14)
pen = QPen(QBrush(QColor(188, 136, 184, 255)), 1.0, style=Qt.DotLine)
pen.setCosmetic(True)
p.setPen(pen)
for pos, tickString in ticks:
# if 10 < pos < self.size().height():
p.drawLine(-parent_w, round(pos), 6, round(pos))
tickRect = QRectF(10, pos - 4, run_width + 2, 10)
p.drawText(tickRect, self.flags, tickString)
def calcTicks(self, shift, scaling, displayRange, maxGridSpace=80, minGridSpace=40):
""" Calculates the axis ticks.
The ticks are calculated along the logarithm of the base 10 of the displayed value range.
The resulting exponent for the ticks is than scaled with respect to the preferred number of
ticks and the value range.
In case the value range would cause more ticks as previously determined the exponent is incremented
by 1. Otherwise if the exponent results in to less ticks, the exponent is divided by 2.
:param shift: offset from point of origin along the current axis (m31 / m32 from transform)
:param scaling: scaling of scene (m11 / m12 / m21 / m22 from transform)
:param displayRange: range of visible pixels
:param maxGridSpace: maximum space between gridlines
:param minGridSpace: minimum space between gridlines
:return: list of ticks (tuple of position and label) and the required tick with
"""
# first lets see how many ticks can be placed on the axis
minNumberOfGridLines = displayRange / float(maxGridSpace)
maxNumberOfGridLines = displayRange / float(minGridSpace)
if maxNumberOfGridLines == 0:
return [], 0
# Calculate the up most and lowest value on axis
upperValue = -shift / scaling
lowerValue = (displayRange - shift) / scaling
idx, = np.where(
np.logical_and(
self.main_axis_values <= upperValue, self.main_axis_values >= lowerValue
)
)
required_tick_width = 0
metrics = QFontMetrics(self.font)
positions = np.ones(int(maxNumberOfGridLines) + 5, np.float) * np.inf
c = 0
labels = []
for k in idx:
v = self.main_axis_values[k]
pos = round(v * scaling + shift)
value = round(self.secondary_axis_values[k], 10)
tickString = self.tickFormat.format(value)
if np.min(np.abs(positions - pos)) < minGridSpace:
# _log.debug("Avoided tick collision")
continue
positions[c] = pos
c += 1
labels.append(tickString)
cur_tick_width = metrics.width(tickString)
if cur_tick_width > required_tick_width:
required_tick_width = cur_tick_width
positions = positions[:c]
return zip(positions, labels), required_tick_width
class ScaleBox(QGraphicsItem):
def __init__(self, parent=None):
""" Overlay tha is visible when a zooming operation is in progress to give the user feedback
about the region that is zoomed in to.
:param parent: Optional parent widget
"""
super(ScaleBox, self).__init__(parent)
self._topLeft = None
self._bottomRight = None
def boundingRect(self):
return QRectF(self._topLeft, self._bottomRight)
def paint(self, p=QPainter(), o=QStyleOptionGraphicsItem(), widget=None):
pen = QPen(Qt.yellow, 2.0, Qt.DotLine)
pen.setCosmetic(True)
brush = QBrush(QColor(255, 255, 0, 127), Qt.SolidPattern)
p.setBrush(brush)
p.setPen(pen)
r = self.boundingRect().adjusted(1, 1, -2, -2)
# _log.debug("SB: {},{} {},{}".format(r.x(), r.y(), r.width(), r.height()))
p.drawRect(r)
class ChartArea(QGraphicsWidget):
# Used to update axis,
hAxisChange = Signal(object)
vAxisChange = Signal(object)
# Notifies interested parties
visibleRangeChange = Signal(object)
def __init__(self, parent=None):
""" Hosts all items that are placed on the chart basically the visible area of
the chart.
Handles pan and zoom and updates the axis accordingly.
.. note:: Instantiated along with the ChartView
:param parent: ChartWidget
"""
super(ChartArea, self).__init__(parent)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self.setFlags(
QGraphicsItem.ItemClipsChildrenToShape | QGraphicsItem.ItemIsFocusable
)
self.__rootItem = ChartItem(self) # invisible root item
self.__initZoomPrepare = False
self.__initZoom = False
self.__mouseMode = None
self.__visibleRange = None
self.__scaleBox = None
self.__maxVisibleRange = None
self.__aspectRatio = None
self._dbg_box_color = Qt.red
@property
def aspectRatio(self):
return self.__aspectRatio
def setAspectRatio(self, value):
self.__aspectRatio = value
if value is not None:
self.adjustRange()
def setMaxVisibleRange(self, rect):
self.__maxVisibleRange = rect.normalized()
def getRootItem(self):
return self.__rootItem
def boundingRect(self):
# Override
b_rect = QRectF(0, 0, self.size().width() - 1, self.size().height() - 1)
return b_rect
def paint(self, p=QPainter(), o=QStyleOptionGraphicsItem(), widget=None):
# Override
if CONFIG.debug: # _log.getEffectiveLevel() < logging.INFO:
p.setPen(QPen(self._dbg_box_color))
p.drawRect(self.boundingRect())
def resizeEvent(self, event):
_log.debug("Received resize")
if self.__visibleRange is None:
self._calcVisibleRange()
self.axisChange()
# self.adjustRange()
def showEvent(self, event):
_log.debug("Show event")
self.adjustRange()
PAN_MODE, ZOOM_BOX_MODE = range(2)
def mousePressEvent(self, event):
_log.debug("Mouse press event")
# self.__mouseMode = ChartArea.PAN_MODE
if Qt.ControlModifier == event.modifiers():
_log.debug("CTRL key pressed")
self.__mouseMode = ChartArea.ZOOM_BOX_MODE
# Part 1 of 2: initialize Scale Box, but that event could be an dbl click
self.__initZoomPrepare = True
# elif Qt.ShiftModifier == event.modifiers():
# self.__mouseMode = ChartArea.PAN_MODE
#
# else:
# # _log.debug("Calling defaults")
# super(ChartArea, self).mousePressEvent(event)
else:
self.__mouseMode = ChartArea.PAN_MODE
# super(ChartArea, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
if self.__mouseMode == ChartArea.ZOOM_BOX_MODE and self.__initZoom:
r = self.__scaleBox.boundingRect().normalized()
_log.debug("SB: {},{} {},{}".format(r.x(), r.y(), r.width(), r.height()))
_log.debug(r)
invTransform, invertible = self.__rootItem.transform().inverted()
if not invertible:
self.__initZoomPrepare = False
self.__initZoom = False
self.__mouseMode = None
return
self.__visibleRange = invTransform.mapRect(r)
self.adjustRange()
self.scene().removeItem(self.__scaleBox)
_log.debug("Emitting Bounds Changed from: mouseReleaseEvent(...)")
self.visibleRangeChange.emit(self.__visibleRange)
self.__initZoomPrepare = False
self.__initZoom = False
self.__mouseMode = None
def mouseMoveEvent(self, event):
if self.__mouseMode == ChartArea.ZOOM_BOX_MODE and self.__initZoom:
# _log.debug("Zoom box ")
self.__scaleBox._bottomRight = event.pos()
self.scene().update()
elif self.__mouseMode == ChartArea.ZOOM_BOX_MODE and self.__initZoomPrepare:
# Part 1 of 2: initialize Scale Box
# _log.debug("Init zoombox")
self.__initZoom = True
self.__scaleBox = ScaleBox(self)
self.__scaleBox._topLeft = event.pos()
self.__scaleBox._bottomRight = event.pos()
self.__initZoomPrepare = False
# set focus to receive key events during scale box drag in order to cancel by pressing excape
self.setFocus()
if self.__mouseMode == ChartArea.PAN_MODE:
self.__pan(event)
self.visibleRangeChange.emit(self.__visibleRange)
def __pan(self, event):
pos = event.pos()
lastPos = event.lastPos()
dif = pos - lastPos
t = self.__rootItem.transform()
n = QTransform(
t.m11(), t.m12(), t.m21(), t.m22(), t.m31() + dif.x(), t.m32() + dif.y()
)
# Limit to boundries
if self.__maxVisibleRange is not None:
r = self.boundingRect()
invTransform, invertible = n.inverted()
if not invertible:
_log.error("Transform cannot be inverted.")
return
visibleRange = invTransform.mapRect(r)
if (
visibleRange.top() < self.__maxVisibleRange.top()
or visibleRange.bottom() > self.__maxVisibleRange.bottom()
or visibleRange.left() < self.__maxVisibleRange.left()
or visibleRange.right() > self.__maxVisibleRange.right()
):
return
# self._logTransform(n, desc="Pan")
self.__rootItem.setTransform(n)
self._calcVisibleRange()
self.axisChange()
def _calcVisibleRange(self):
r = self.boundingRect()
invTransform, invertible = self.__rootItem.transform().inverted()
if not invertible:
_log.error("Transform cannot be inverted.")
return
self.__visibleRange = invTransform.mapRect(r)
def keyPressEvent(self, event):
_log.debug("Key event")
if event.key() == Qt.Key_Escape and self.__initZoom:
_log.debug("Canceling scalebox zoom")
self.scene().removeItem(self.__scaleBox)
self.__initZoom = False
self.update()
def wheelEvent(self, event):
_log.debug("Wheel on area")
t = self.__rootItem.transform()
inv_t, invertible = t.inverted()
if not invertible:
_log.error("Transformation not invertible!")
return
if self.__visibleRange is None:
# _log.debug("No visible range")
self._calcVisibleRange()
# return
# Calculate coords under mouse
coords = inv_t.map(event.pos())
_log.debug("Coords: {}, {}".format(coords.x(), coords.y()))
bbox = self.__visibleRange
_log.debug("Visible Range: {}".format(bbox))
# Percentages in visible range, hopefully this is rotation invariant
pct_left = (coords.x() - bbox.left()) / bbox.width()
pct_top = (coords.y() - bbox.top()) / bbox.height()
_log.debug(
"Percentage from x/y: {:2.2f}/{:2.2f}".format(
pct_left * 100.0, pct_top * 100.0
)
)
if not 0 <= pct_left <= 1.0 or not 0 <= pct_top <= 1.0:
_log.error("Percentages OUT of bounds...")
if event.delta() < 0:
zoom_factor = 1.2
else:
zoom_factor = 1 / 1.2
new_width = bbox.width() * zoom_factor
# deltaWidth = new_width - bbox.width()
new_height = bbox.height() * zoom_factor
# deltaHeight = new_height - bbox.height()
_log.debug("New sizes: {}, {}".format(new_width, new_height))
# _log.debug("Size change: {}, {}".format(deltaWidth, deltaHeight))
newLeft = coords.x() - new_width * pct_left
newTop = coords.y() - new_height * pct_top
newBbox = QRectF(
QPointF(newLeft, newTop), QSizeF(new_width, new_height)
).normalized()
_log.debug("New Visible Range: {}".format(newBbox))
self.__visibleRange = newBbox
self.adjustRange()
self.visibleRangeChange.emit(self.__visibleRange)
@classmethod
def _logTransform(cls, t, desc=None):
""" Dumps the QTransform values, used for development.
:param t:
:param desc:
:return:
"""
if desc is None:
_log.debug("[")
else:
_log.debug("{} = [".format(desc))
_log.debug("{:2.2f},\t{:2.2f},\t{:2.2f};".format(t.m11(), t.m12(), t.m13()))
_log.debug("{:2.2f},\t{:2.2f},\t{:2.2f};".format(t.m21(), t.m22(), t.m23()))
_log.debug("{:2.2f},\t{:2.2f},\t{:2.2f}".format(t.m31(), t.m32(), t.m33()))
_log.debug("]")
def mouseDoubleClickEvent(self, event):
self.autoRange()
# self.visibleRangeChange.emit(self.__visibleRange)
def axisChange(self):
t = self.__rootItem.transform()
self.vAxisChange.emit(t)
self.hAxisChange.emit(t)
def autoRange(self):
children = self.__rootItem.childItems()
if len(children) == 0:
return
bbox = None
for c in children:
if int(c.flags()) & int(QGraphicsItem.ItemIgnoresTransformations):
rect = QRectF(-0.5e-8, -0.5e-8, 1e-8, 1e-8)
else:
rect = c.boundingRect()
if c.chartItemFlags & ChartItemFlags.FLAG_NO_AUTO_RANGE:
continue
if bbox is None:
bbox = rect.normalized()
bbox.moveCenter(bbox.center() + c.pos())
else:
other = rect.normalized()
other.moveCenter(other.center() + c.pos())
bbox = bbox.united(other)
_log.debug("bbox: {}".format(bbox))
if bbox is None:
return
if bbox.height() == 0:
bbox.adjust(0, -0.5, 0, 0.5)
h_pad = bbox.height() * 0.005 # add 0.5 % to the visible range.
w_pad = bbox.width() * 0.005
bbox.adjust(-w_pad, -h_pad, w_pad, h_pad)
self.__visibleRange = bbox
self.adjustRange()
self.visibleRangeChange.emit(self.__visibleRange)
def adjustRange(self):
if self.__visibleRange is None:
return
if self.__maxVisibleRange:
# TODO: Feels shitty when paning against the bounds
_log.debug("Max visible: {}".format(self.__maxVisibleRange))
if self.__visibleRange.top() < self.__maxVisibleRange.top():
self.__visibleRange.setTop(self.__maxVisibleRange.top())
if self.__visibleRange.bottom() > self.__maxVisibleRange.bottom():
self.__visibleRange.setBottom(self.__maxVisibleRange.bottom())
if self.__visibleRange.left() < self.__maxVisibleRange.left():
self.__visibleRange.setLeft(self.__maxVisibleRange.left())
if self.__visibleRange.right() > self.__maxVisibleRange.right():
self.__visibleRange.setRight(self.__maxVisibleRange.right())
area = self.size()
bbox = self.__visibleRange
_log.debug(bbox)
def sign(v):
""" Signum function, python does not have it
:param v:
:return:
"""
if v == 0.0:
return 0.0
elif v > 0.0:
return 1.0
else:
return -1.0
t = self.__rootItem.transform()
_log.debug(
"Area Aspect: {} / {} = 1:{}".format(
area.width(), area.height(), area.height() / area.width()
)
)
_log.debug(
"View Aspect: {} / {} = 1:{}".format(
bbox.width(), bbox.height(), bbox.width() / bbox.height()
)
)
# Enforce fixed aspect ratio
if self.__aspectRatio is not None:
if t.m12() != 0:
# AUTOSAR width and height flipped
bbox.setWidth(
self.__aspectRatio * area.height() / area.width() * bbox.height()
)
else:
bbox.setWidth(
self.__aspectRatio * area.width() / area.height() * bbox.height()
)
s11 = (area.width()) / bbox.width() * sign(t.m11())
s12 = (area.height()) / bbox.width() * sign(t.m12())
s21 = (area.width()) / bbox.height() * sign(t.m21())
s22 = (area.height()) / bbox.height() * sign(t.m22())
_log.info("{}, {}; {}, {}".format(s11, s12, s21, s22))
# The following shift works for not transformed, flipped y, flipped y and rotated 90 degrees
# and yes this still is ugly....
if s11 > 0 or s21 > 0:
dx = -bbox.left() * s11
else:
dx = -bbox.bottom() * s21
if s22 > 0 or s12 > 0:
dy = -bbox.top() * s22
else:
dy = -bbox.bottom() * s22 + -bbox.right() * s12
# Update and apply the new transform to the chart items.
n = QTransform()
n.setMatrix(s11, s12, 0, s21, s22, 0, dx, dy, 1)
self.__rootItem.setTransform(n)
self.axisChange()
def setRange(self, bbox):
r = bbox.normalized()
_log.debug("Set range")
_log.debug("bbox: {}".format(r))
self.__visibleRange = r
self.adjustRange()
def __repr__(self):
return "<ChartArea>"
def __del__(self):
_log.debug("Finalizing: {}".format(self))
| |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
This example illustrates the use of `NeuTraReparam` to run neural transport HMC [1]
on a toy model that draws from a banana-shaped bivariate distribution [2]. We first
train an autoguide by using `AutoNormalizingFlow` that learns a transformation from
a simple latent space (isotropic gaussian) to the more complex geometry of the
posterior. Subsequently, we use `NeuTraReparam` to run HMC and draw samples from this
simplified "warped" posterior. Finally, we use our learnt transformation to transform
these samples back to the original space. For comparison, we also draw samples from
a NeuTra-reparametrized model that uses a much simpler `AutoDiagonalNormal` guide.
References:
----------
[1] Hoffman, M., Sountsov, P., Dillon, J. V., Langmore, I., Tran, D., and Vasudevan,
S. Neutra-lizing bad geometry in hamiltonian monte carlo using neural transport.
arXiv preprint arXiv:1903.03704, 2019.
[2] Wang Z., Broccardo M., and Song J. Hamiltonian Monte Carlo Methods for Subset
Simulation in Reliability Analysis. arXiv preprint arXiv:1706.01435, 2018.
"""
import argparse
import logging
import os
from functools import partial
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from matplotlib.gridspec import GridSpec
from torch.distributions.utils import broadcast_all
import pyro
import pyro.distributions as dist
from pyro import optim, poutine
from pyro.distributions import constraints
from pyro.distributions.transforms import block_autoregressive, iterated
from pyro.infer import MCMC, NUTS, SVI, Trace_ELBO
from pyro.infer.autoguide import AutoDiagonalNormal, AutoNormalizingFlow
from pyro.infer.reparam import NeuTraReparam
logging.basicConfig(format="%(message)s", level=logging.INFO)
class BananaShaped(dist.TorchDistribution):
arg_constraints = {"a": constraints.positive, "b": constraints.real}
support = constraints.real_vector
def __init__(self, a, b, rho=0.9):
self.a, self.b, self.rho = broadcast_all(a, b, rho)
self.mvn = dist.MultivariateNormal(
torch.tensor([0.0, 0.0]),
covariance_matrix=torch.tensor([[1.0, self.rho], [self.rho, 1.0]]),
)
super().__init__(event_shape=(2,))
def sample(self, sample_shape=()):
u = self.mvn.sample(sample_shape)
u0, u1 = u[..., 0], u[..., 1]
a, b = self.a, self.b
x = a * u0
y = (u1 / a) + b * (u0 ** 2 + a ** 2)
return torch.stack([x, y], -1)
def log_prob(self, x):
x, y = x[..., 0], x[..., 1]
a, b = self.a, self.b
u0 = x / a
u1 = (y - b * (u0 ** 2 + a ** 2)) * a
return self.mvn.log_prob(torch.stack([u0, u1], dim=-1))
def model(a, b, rho=0.9):
pyro.sample("x", BananaShaped(a, b, rho))
def fit_guide(guide, args):
pyro.clear_param_store()
adam = optim.Adam({"lr": args.learning_rate})
svi = SVI(model, guide, adam, Trace_ELBO())
for i in range(args.num_steps):
loss = svi.step(args.param_a, args.param_b)
if i % 500 == 0:
logging.info("[{}]Elbo loss = {:.2f}".format(i, loss))
def run_hmc(args, model):
nuts_kernel = NUTS(model)
mcmc = MCMC(nuts_kernel, warmup_steps=args.num_warmup, num_samples=args.num_samples)
mcmc.run(args.param_a, args.param_b)
mcmc.summary()
return mcmc
def main(args):
pyro.set_rng_seed(args.rng_seed)
fig = plt.figure(figsize=(8, 16), constrained_layout=True)
gs = GridSpec(4, 2, figure=fig)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
ax3 = fig.add_subplot(gs[1, 0])
ax4 = fig.add_subplot(gs[2, 0])
ax5 = fig.add_subplot(gs[3, 0])
ax6 = fig.add_subplot(gs[1, 1])
ax7 = fig.add_subplot(gs[2, 1])
ax8 = fig.add_subplot(gs[3, 1])
xlim = tuple(int(x) for x in args.x_lim.strip().split(","))
ylim = tuple(int(x) for x in args.y_lim.strip().split(","))
assert len(xlim) == 2
assert len(ylim) == 2
# 1. Plot samples drawn from BananaShaped distribution
x1, x2 = torch.meshgrid([torch.linspace(*xlim, 100), torch.linspace(*ylim, 100)])
d = BananaShaped(args.param_a, args.param_b)
p = torch.exp(d.log_prob(torch.stack([x1, x2], dim=-1)))
ax1.contourf(
x1,
x2,
p,
cmap="OrRd",
)
ax1.set(
xlabel="x0",
ylabel="x1",
xlim=xlim,
ylim=ylim,
title="BananaShaped distribution: \nlog density",
)
# 2. Run vanilla HMC
logging.info("\nDrawing samples using vanilla HMC ...")
mcmc = run_hmc(args, model)
vanilla_samples = mcmc.get_samples()["x"].cpu().numpy()
ax2.contourf(x1, x2, p, cmap="OrRd")
ax2.set(
xlabel="x0",
ylabel="x1",
xlim=xlim,
ylim=ylim,
title="Posterior \n(vanilla HMC)",
)
sns.kdeplot(vanilla_samples[:, 0], vanilla_samples[:, 1], ax=ax2)
# 3(a). Fit a diagonal normal autoguide
logging.info("\nFitting a DiagNormal autoguide ...")
guide = AutoDiagonalNormal(model, init_scale=0.05)
fit_guide(guide, args)
with pyro.plate("N", args.num_samples):
guide_samples = guide()["x"].detach().cpu().numpy()
ax3.contourf(x1, x2, p, cmap="OrRd")
ax3.set(
xlabel="x0",
ylabel="x1",
xlim=xlim,
ylim=ylim,
title="Posterior \n(DiagNormal autoguide)",
)
sns.kdeplot(guide_samples[:, 0], guide_samples[:, 1], ax=ax3)
# 3(b). Draw samples using NeuTra HMC
logging.info("\nDrawing samples using DiagNormal autoguide + NeuTra HMC ...")
neutra = NeuTraReparam(guide.requires_grad_(False))
neutra_model = poutine.reparam(model, config=lambda _: neutra)
mcmc = run_hmc(args, neutra_model)
zs = mcmc.get_samples()["x_shared_latent"]
sns.scatterplot(zs[:, 0], zs[:, 1], alpha=0.2, ax=ax4)
ax4.set(
xlabel="x0",
ylabel="x1",
title="Posterior (warped) samples \n(DiagNormal + NeuTra HMC)",
)
samples = neutra.transform_sample(zs)
samples = samples["x"].cpu().numpy()
ax5.contourf(x1, x2, p, cmap="OrRd")
ax5.set(
xlabel="x0",
ylabel="x1",
xlim=xlim,
ylim=ylim,
title="Posterior (transformed) \n(DiagNormal + NeuTra HMC)",
)
sns.kdeplot(samples[:, 0], samples[:, 1], ax=ax5)
# 4(a). Fit a BNAF autoguide
logging.info("\nFitting a BNAF autoguide ...")
guide = AutoNormalizingFlow(
model, partial(iterated, args.num_flows, block_autoregressive)
)
fit_guide(guide, args)
with pyro.plate("N", args.num_samples):
guide_samples = guide()["x"].detach().cpu().numpy()
ax6.contourf(x1, x2, p, cmap="OrRd")
ax6.set(
xlabel="x0",
ylabel="x1",
xlim=xlim,
ylim=ylim,
title="Posterior \n(BNAF autoguide)",
)
sns.kdeplot(guide_samples[:, 0], guide_samples[:, 1], ax=ax6)
# 4(b). Draw samples using NeuTra HMC
logging.info("\nDrawing samples using BNAF autoguide + NeuTra HMC ...")
neutra = NeuTraReparam(guide.requires_grad_(False))
neutra_model = poutine.reparam(model, config=lambda _: neutra)
mcmc = run_hmc(args, neutra_model)
zs = mcmc.get_samples()["x_shared_latent"]
sns.scatterplot(zs[:, 0], zs[:, 1], alpha=0.2, ax=ax7)
ax7.set(
xlabel="x0",
ylabel="x1",
title="Posterior (warped) samples \n(BNAF + NeuTra HMC)",
)
samples = neutra.transform_sample(zs)
samples = samples["x"].cpu().numpy()
ax8.contourf(x1, x2, p, cmap="OrRd")
ax8.set(
xlabel="x0",
ylabel="x1",
xlim=xlim,
ylim=ylim,
title="Posterior (transformed) \n(BNAF + NeuTra HMC)",
)
sns.kdeplot(samples[:, 0], samples[:, 1], ax=ax8)
plt.savefig(os.path.join(os.path.dirname(__file__), "neutra.pdf"))
if __name__ == "__main__":
assert pyro.__version__.startswith("1.7.0")
parser = argparse.ArgumentParser(
description="Example illustrating NeuTra Reparametrizer"
)
parser.add_argument(
"-n", "--num-steps", default=10000, type=int, help="number of SVI steps"
)
parser.add_argument(
"-lr",
"--learning-rate",
default=1e-2,
type=float,
help="learning rate for the Adam optimizer",
)
parser.add_argument("--rng-seed", default=1, type=int, help="RNG seed")
parser.add_argument(
"--num-warmup", default=500, type=int, help="number of warmup steps for NUTS"
)
parser.add_argument(
"--num-samples",
default=1000,
type=int,
help="number of samples to be drawn from NUTS",
)
parser.add_argument(
"--param-a",
default=1.15,
type=float,
help="parameter `a` of BananaShaped distribution",
)
parser.add_argument(
"--param-b",
default=1.0,
type=float,
help="parameter `b` of BananaShaped distribution",
)
parser.add_argument(
"--num-flows", default=1, type=int, help="number of flows in the BNAF autoguide"
)
parser.add_argument(
"--x-lim", default="-3,3", type=str, help="x limits for the plots"
)
parser.add_argument(
"--y-lim", default="0,8", type=str, help="y limits for the plots"
)
args = parser.parse_args()
main(args)
| |
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cuda.cudnn
_sampler_type = libcudnn.CUDNN_SAMPLER_BILINEAR
class SpatialTransformerSampler(function.Function):
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 == n_in)
x_type = in_types[0]
grid_type = in_types[1]
type_check.expect(
x_type.dtype == numpy.float32,
grid_type.dtype == numpy.float32,
x_type.ndim == 4,
grid_type.ndim == 4,
grid_type.shape[1] == 2,
x_type.shape[0] == grid_type.shape[0],
)
def forward_cpu(self, inputs):
return self._forward(inputs)
def forward_gpu(self, inputs):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._forward(inputs)
x, grid = inputs
out_shape = x.shape[:2] + grid.shape[2:]
y = cuda.cupy.empty(out_shape, dtype=x.dtype)
shape = numpy.array(out_shape, dtype=numpy.int32)
x = cuda.cupy.ascontiguousarray(x)
grid_t = cuda.cupy.transpose(grid, (0, 2, 3, 1))
grid_t = cuda.cupy.ascontiguousarray(grid_t)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
y_desc = cudnn.create_tensor_descriptor(y)
self.st_desc =\
cuda.cupy.cudnn.create_spatial_transformer_descriptor(
_sampler_type, grid.dtype, len(shape), shape.ctypes.data)
one = numpy.array(1, dtype=x.dtype).ctypes
zero = numpy.array(0, dtype=x.dtype).ctypes
libcudnn.spatialTfSamplerForward(
handle, self.st_desc.value, one.data,
x_desc.value, x.data.ptr, grid_t.data.ptr, zero.data,
y_desc.value, y.data.ptr)
return y,
def _forward(self, inputs):
x, grid = inputs
xp = backend.get_array_module(x)
B, C, H, W = x.shape
_, _, out_H, out_W = grid.shape
grid = grid.reshape(grid.shape[:2] + (-1,))
u = grid[:, 0]
v = grid[:, 1]
# Pad the image so that pixels locating outside of the original
# image's size can be sampled.
x_pad = xp.pad(x, ((0, 0), (0, 0), (1, 1), (1, 1)), mode='constant')
# Rescale coordinates from [-1, 1] to [0, width or height - 1],
# and adjust them to the padded image.
u = (u + 1) * (W - 1) / 2 + 1
v = (v + 1) * (H - 1) / 2 + 1
u_clipped = u.clip(0, W + 1)
v_clipped = v.clip(0, H + 1)
# indices of the 2x2 pixel neighborhood surrounding the coordinates
u0 = xp.floor(u_clipped).astype(numpy.int32)
u0 = u0.clip(0, W)
u1 = u0 + 1
v0 = xp.floor(v_clipped).astype(numpy.int32)
v0 = v0.clip(0, H)
v1 = v0 + 1
# weights
w1 = (u1 - u_clipped) * (v1 - v_clipped)
w2 = (u_clipped - u0) * (v1 - v_clipped)
w3 = (u1 - u_clipped) * (v_clipped - v0)
w4 = (u_clipped - u0) * (v_clipped - v0)
w1 = w1.astype(x_pad.dtype, copy=False)
w2 = w2.astype(x_pad.dtype, copy=False)
w3 = w3.astype(x_pad.dtype, copy=False)
w4 = w4.astype(x_pad.dtype, copy=False)
x_indexed_1 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_2 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u1[b]], axis=0) for b in range(B)], axis=0)
x_indexed_3 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_4 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u1[b]], axis=0) for b in range(B)], axis=0)
y = w1[:, :, None] * x_indexed_1
y += w2[:, :, None] * x_indexed_2
y += w3[:, :, None] * x_indexed_3
y += w4[:, :, None] * x_indexed_4
y = y.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
return y,
def backward_cpu(self, inputs, grad_outputs):
return self._backward(inputs, grad_outputs)
def backward_gpu(self, inputs, grad_outputs):
if not chainer.should_use_cudnn('>=auto', 5000):
return self._backward(inputs, grad_outputs)
x, grid = inputs
gy, = grad_outputs
grid_t = cuda.cupy.transpose(grid, (0, 2, 3, 1))
grid_t = cuda.cupy.ascontiguousarray(grid_t)
x = cuda.cupy.ascontiguousarray(x)
gy = cuda.cupy.ascontiguousarray(gy)
gx = cuda.cupy.empty_like(x)
ggrid_t = cuda.cupy.empty_like(grid_t)
handle = cudnn.get_handle()
x_desc = cudnn.create_tensor_descriptor(x)
dx_desc = cudnn.create_tensor_descriptor(gx)
dy_desc = cudnn.create_tensor_descriptor(gy)
one = numpy.array(1, dtype=x.dtype).ctypes
zero = numpy.array(0, dtype=x.dtype).ctypes
libcudnn.spatialTfSamplerBackward(
handle, self.st_desc.value,
one.data,
x_desc.value, x.data.ptr,
zero.data,
dx_desc.value, gx.data.ptr,
one.data,
dy_desc.value, gy.data.ptr,
grid_t.data.ptr, zero.data, ggrid_t.data.ptr)
ggrid = cuda.cupy.transpose(ggrid_t, axes=(0, 3, 1, 2))
return gx, ggrid
def _backward(self, inputs, grad_outputs):
x, grid = inputs
xp = backend.get_array_module(x)
gy, = grad_outputs
B, C, H, W = x.shape
_, _, out_H, out_W = grid.shape
grid = grid.reshape(grid.shape[:2] + (-1,))
u = grid[:, 0]
v = grid[:, 1]
# Pad the image so that points locating outside of the original
# image's size can be sampled.
x_pad = xp.pad(x, ((0, 0), (0, 0), (1, 1), (1, 1)), mode='constant')
# Rescale coordinates from [-1, 1] to [0, width or height - 1],
# and adjust them to the padded image.
u = (u + 1) * (W - 1) / 2 + 1
v = (v + 1) * (H - 1) / 2 + 1
u_clipped = u.clip(0, W + 1)
v_clipped = v.clip(0, H + 1)
# indices of the 2x2 pixel neighborhood surrounding the coordinates
u0 = xp.floor(u_clipped).astype(numpy.int32)
u0 = u0.clip(0, W)
u1 = u0 + 1
v0 = xp.floor(v_clipped).astype(numpy.int32)
v0 = v0.clip(0, H)
v1 = v0 + 1
# weights
wu0 = u_clipped - u0
wu1 = u1 - u_clipped
wv0 = v_clipped - v0
wv1 = v1 - v_clipped
wu0 = wu0.astype(gy.dtype, copy=False)
wu1 = wu1.astype(gy.dtype, copy=False)
wv0 = wv0.astype(gy.dtype, copy=False)
wv1 = wv1.astype(gy.dtype, copy=False)
# --- gu, gv
x_indexed_1 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_2 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v0[b], u1[b]], axis=0) for b in range(B)], axis=0)
x_indexed_3 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u0[b]], axis=0) for b in range(B)], axis=0)
x_indexed_4 = xp.concatenate([xp.expand_dims(
x_pad[b, :, v1[b], u1[b]], axis=0) for b in range(B)], axis=0)
gu = -wv1[:, :, None] * x_indexed_1
gu += wv1[:, :, None] * x_indexed_2
gu -= wv0[:, :, None] * x_indexed_3
gu += wv0[:, :, None] * x_indexed_4
gv = -wu1[:, :, None] * x_indexed_1
gv -= wu0[:, :, None] * x_indexed_2
gv += wu1[:, :, None] * x_indexed_3
gv += wu0[:, :, None] * x_indexed_4
gu = gu.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
gv = gv.reshape(B, out_H, out_W, C).transpose(0, 3, 1, 2)
gu *= gy
gv *= gy
gu = xp.sum(gu, axis=1)
gv = xp.sum(gv, axis=1)
# Offsets scaling of the coordinates and clip gradients.
u_reshaped = u.reshape(gu.shape)
v_reshaped = v.reshape(gv.shape)
gu = gu / 2. * (W - 1) * (u_reshaped > 0) * (u_reshaped < (W + 1))
gv = gv / 2. * (H - 1) * (v_reshaped > 0) * (v_reshaped < (H + 1))
ggrid = xp.concatenate((gu[:, None], gv[:, None]), axis=1)
# --- gx
if xp is numpy:
scatter_add = numpy.add.at
else:
scatter_add = cuda.cupyx.scatter_add
gx = xp.zeros_like(x_pad)
gy = gy.reshape(B, C, -1)
for b in range(B):
scatter_add(gx[b], (slice(None), v0[b], u0[b]),
gy[b] * wu1[b] * wv1[b])
scatter_add(gx[b], (slice(None), v0[b], u1[b]),
gy[b] * wu0[b] * wv1[b])
scatter_add(gx[b], (slice(None), v1[b], u0[b]),
gy[b] * wu1[b] * wv0[b])
scatter_add(gx[b], (slice(None), v1[b], u1[b]),
gy[b] * wu0[b] * wv0[b])
gx = gx[:, :, 1:-1, 1:-1]
return gx, ggrid
def spatial_transformer_sampler(x, grid, **kwargs):
"""2D Spatial Transformer sampler.
This is a differentiable image sampler. With a set of sampling points
``grid`` and an input feature map ``x``, this produces a sampled output
feature map.
This function currently only supports bilinear interpolation as a sampling
kernel.
When coordinates in ``grid`` is outside range :math:`[-1, 1]`, values are
sampled from a zero padded input image.
Notatition: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input channels.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`h_O` and :math:`w_O` are the height and width of the output
image.
See detail in the following paper: `Spatial Transformer Networks \
<https://arxiv.org/abs/1506.02025>`_.
.. note::
cuDNN supports SpatialTransformerSampler from version 5.0.0.
Args:
x (~chainer.Variable): Input variable of shape :math:`(n, c_I, h, w)`.
grid (~chainer.Variable): Coordinate variable of shape
:math:`(n, 2, h_O, w_O)`. Each coordinate defines the spatial
location in the input where a sampling kernel is applied to get
the value at a particular pixel in the output.
``grid[idx, :, i, j]`` corresponds to the coordinate that is used
to sample the values for an output pixel at location
:math:`(i, j)`.
In the second dimension, the first coordinate corresponds to the
location along the horizontal axis, and the second coordinate
corresponds to the location along the vertical axis.
The coordinate :math:`(-1, -1)` corresponds to the upper-left
corner of the input image.
Returns:
~chainer.Variable: Output feature map of shape \
:math:`(n, c_I, h_O, w_O)`.
"""
if kwargs:
argument.check_unexpected_kwargs(
kwargs, use_cudnn="The argument \"use_cudnn\" is not "
"supported anymore. "
"Use chainer.using_config('use_cudnn', value) "
"context where value can be `always`, `never`, or `auto`.")
argument.assert_kwargs_empty(kwargs)
return SpatialTransformerSampler()(x, grid)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
from builtins import object
from hashlib import sha1
from future.utils import raise_from
from pants.build_graph.build_graph import sort_targets
from pants.build_graph.target import Target
from pants.invalidation.build_invalidator import CacheKey
from pants.util.dirutil import relative_symlink, safe_delete, safe_mkdir, safe_rmtree
from pants.util.memo import memoized_method
class VersionedTargetSet(object):
"""Represents a list of targets, a corresponding CacheKey, and a flag determining whether the
list of targets is currently valid.
When invalidating a single target, this can be used to represent that target as a singleton.
When checking the artifact cache, this can also be used to represent a list of targets that are
built together into a single artifact.
"""
class IllegalResultsDir(Exception):
"""Indicate a problem interacting with a versioned target results directory."""
@staticmethod
def from_versioned_targets(versioned_targets):
"""
:API: public
"""
first_target = versioned_targets[0]
cache_manager = first_target._cache_manager
# Quick sanity check; all the versioned targets should have the same cache manager.
# TODO(ryan): the way VersionedTargets store their own links to a single CacheManager instance
# feels hacky; see if there's a cleaner way for callers to handle awareness of the CacheManager.
for versioned_target in versioned_targets:
if versioned_target._cache_manager != cache_manager:
raise ValueError("Attempting to combine versioned targets {} and {} with different"
" CacheManager instances: {} and {}".format(first_target, versioned_target,
cache_manager,
versioned_target._cache_manager))
return VersionedTargetSet(cache_manager, versioned_targets)
def __init__(self, cache_manager, versioned_targets):
self._cache_manager = cache_manager
self.versioned_targets = versioned_targets
self.targets = [vt.target for vt in versioned_targets]
# The following line is a no-op if cache_key was set in the VersionedTarget __init__ method.
self.cache_key = CacheKey.combine_cache_keys([vt.cache_key for vt in versioned_targets])
# NB: previous_cache_key may be None on the first build of a target.
self.previous_cache_key = cache_manager.previous_key(self.cache_key)
self.valid = self.previous_cache_key == self.cache_key
if cache_manager.invalidation_report:
cache_manager.invalidation_report.add_vts(cache_manager.task_name,
self.targets,
self.cache_key,
self.valid,
phase='init')
self._results_dir = None
self._current_results_dir = None
self._previous_results_dir = None
# True if the results_dir for this VT was created incrementally via clone of the
# previous results_dir.
self.is_incremental = False
@property
def cacheable(self):
"""Indicates whether artifacts associated with this target set should be cached.
:return: `True` if this target set's associated artifacts can be cached.
:rtype: bool
"""
return self._cache_manager.cacheable(self.cache_key)
def update(self):
self._cache_manager.update(self)
def force_invalidate(self):
# Note: This method isn't exposted as Public because the api is not yet
# finalized, however it is currently used by Square for plugins. There is
# an open OSS issue to finalize this API. Please take care when changing
# until https://github.com/pantsbuild/pants/issues/2532 is resolved.
self._cache_manager.force_invalidate(self)
@property
def has_results_dir(self):
return self._results_dir is not None
@property
def has_previous_results_dir(self):
return self._previous_results_dir is not None and os.path.isdir(self._previous_results_dir)
@property
def results_dir(self):
"""The directory that stores results for these targets.
The results_dir is represented by a stable symlink to the current_results_dir: consumers
should generally prefer to access the stable directory.
"""
if self._results_dir is None:
raise ValueError('No results_dir was created for {}'.format(self))
return self._results_dir
@property
def current_results_dir(self):
"""A unique directory that stores results for this version of these targets.
"""
if self._current_results_dir is None:
raise ValueError('No results_dir was created for {}'.format(self))
return self._current_results_dir
@property
def previous_results_dir(self):
"""The directory that stores results for the previous version of these targets.
Only valid if is_incremental is true.
TODO: Exposing old results is a bit of an abstraction leak, because ill-behaved Tasks could
mutate them.
"""
if not self.has_previous_results_dir:
raise ValueError('There is no previous_results_dir for: {}'.format(self))
return self._previous_results_dir
def ensure_legal(self):
"""Return True as long as the state does not break any internal contracts."""
# Do our best to provide complete feedback, it's easy to imagine the frustration of flipping between error states.
if self._results_dir:
errors = ''
if not os.path.islink(self._results_dir):
errors += '\nThe results_dir is no longer a symlink:\n\t* {}'.format(self._results_dir)
if not os.path.isdir(self._current_results_dir):
errors += '\nThe current_results_dir directory was not found\n\t* {}'.format(self._current_results_dir)
if errors:
raise self.IllegalResultsDir(
'\nThe results_dirs state should not be manually cleaned or recreated by tasks.\n{}'.format(errors)
)
return True
def live_dirs(self):
"""Yields directories that must exist for this VersionedTarget to function."""
# The only caller of this function is the workdir cleaning pipeline. It is not clear that the previous_results_dir
# should be returned for that purpose. And, by the time this is called, the contents have already been copied.
if self.has_results_dir:
yield self.results_dir
yield self.current_results_dir
if self.has_previous_results_dir:
yield self.previous_results_dir
@memoized_method
def _target_to_vt(self):
return {vt.target: vt for vt in self.versioned_targets}
def __repr__(self):
return 'VTS({}, {})'.format(','.join(target.address.spec for target in self.targets),
'valid' if self.valid else 'invalid')
class VersionedTarget(VersionedTargetSet):
"""This class represents a singleton VersionedTargetSet.
:API: public
"""
def __init__(self, cache_manager, target, cache_key):
"""
:API: public
"""
if not isinstance(target, Target):
raise ValueError("The target {} must be an instance of Target but is not.".format(target.id))
self.target = target
self.cache_key = cache_key
# Must come after the assignments above, as they are used in the parent's __init__.
super(VersionedTarget, self).__init__(cache_manager, [self])
self.id = target.id
@property
def cacheable(self):
"""Indicates whether artifacts associated with this target should be cached.
:return: `True` if this target's associated artifacts can be cached.
:rtype: bool
"""
return super(VersionedTarget, self).cacheable and not self.target.no_cache
def create_results_dir(self):
"""Ensure that the empty results directory and a stable symlink exist for these versioned targets."""
self._current_results_dir = self._cache_manager._results_dir_path(self.cache_key, stable=False)
self._results_dir = self._cache_manager._results_dir_path(self.cache_key, stable=True)
if not self.valid:
# Clean the workspace for invalid vts.
safe_mkdir(self._current_results_dir, clean=True)
relative_symlink(self._current_results_dir, self._results_dir)
self.ensure_legal()
def copy_previous_results(self):
"""Use the latest valid results_dir as the starting contents of the current results_dir.
Should be called after the cache is checked, since previous_results are not useful if there is
a cached artifact.
"""
# TODO(mateo): This should probably be managed by the task, which manages the rest of the
# incremental support.
if not self.previous_cache_key:
return None
previous_path = self._cache_manager._results_dir_path(self.previous_cache_key, stable=False)
if os.path.isdir(previous_path):
self.is_incremental = True
safe_rmtree(self._current_results_dir)
shutil.copytree(previous_path, self._current_results_dir)
safe_mkdir(self._current_results_dir)
relative_symlink(self._current_results_dir, self.results_dir)
# Set the self._previous last, so that it is only True after the copy completed.
self._previous_results_dir = previous_path
def __repr__(self):
return 'VT({}, {})'.format(self.target.id, 'valid' if self.valid else 'invalid')
class InvalidationCheck(object):
"""The result of calling check() on a CacheManager.
Each member is a list of VersionedTargetSet objects. Sorting of the targets depends
on how you order the InvalidationCheck from the InvalidationCacheManager.
Tasks may need to perform no, some or all operations on either of these, depending on how they
are implemented.
"""
def __init__(self, all_vts, invalid_vts):
"""
:API: public
"""
# All the targets, valid and invalid.
self.all_vts = all_vts
# Just the invalid targets.
self.invalid_vts = invalid_vts
class InvalidationCacheManager(object):
"""Manages cache checks, updates and invalidation keeping track of basic change
and invalidation statistics.
Note that this is distinct from the ArtifactCache concept, and should probably be renamed.
"""
class CacheValidationError(Exception):
"""Indicates a problem accessing the cache."""
_STABLE_DIR_NAME = 'current'
def __init__(self,
results_dir_root,
cache_key_generator,
build_invalidator,
invalidate_dependents,
fingerprint_strategy=None,
invalidation_report=None,
task_name=None,
task_version=None,
artifact_write_callback=lambda _: None):
"""
:API: public
"""
self._cache_key_generator = cache_key_generator
self._task_name = task_name or 'UNKNOWN'
self._task_version = task_version or 'Unknown_0'
self._invalidate_dependents = invalidate_dependents
self._invalidator = build_invalidator
self._fingerprint_strategy = fingerprint_strategy
self._artifact_write_callback = artifact_write_callback
self.invalidation_report = invalidation_report
# Create the task-versioned prefix of the results dir, and a stable symlink to it
# (useful when debugging).
task_version_sha = sha1(self._task_version.encode('utf-8')).hexdigest()[:12]
self._results_dir_prefix = os.path.join(results_dir_root,
task_version_sha)
safe_mkdir(self._results_dir_prefix)
stable_prefix = os.path.join(results_dir_root, self._STABLE_DIR_NAME)
safe_delete(stable_prefix)
relative_symlink(self._results_dir_prefix, stable_prefix)
def update(self, vts):
"""Mark a changed or invalidated VersionedTargetSet as successfully processed."""
for vt in vts.versioned_targets:
vt.ensure_legal()
if not vt.valid:
self._invalidator.update(vt.cache_key)
vt.valid = True
self._artifact_write_callback(vt)
if not vts.valid:
vts.ensure_legal()
self._invalidator.update(vts.cache_key)
vts.valid = True
self._artifact_write_callback(vts)
def force_invalidate(self, vts):
"""Force invalidation of a VersionedTargetSet."""
for vt in vts.versioned_targets:
self._invalidator.force_invalidate(vt.cache_key)
vt.valid = False
self._invalidator.force_invalidate(vts.cache_key)
vts.valid = False
def check(self,
targets,
topological_order=False):
"""Checks whether each of the targets has changed and invalidates it if so.
Returns a list of VersionedTargetSet objects (either valid or invalid). The returned sets
'cover' the input targets, with one caveat: if the FingerprintStrategy
opted out of fingerprinting a target because it doesn't contribute to invalidation, then that
target will be excluded from all_vts and invalid_vts.
Callers can inspect these vts and rebuild the invalid ones, for example.
"""
all_vts = self.wrap_targets(targets, topological_order=topological_order)
invalid_vts = [vt for vt in all_vts if not vt.valid]
return InvalidationCheck(all_vts, invalid_vts)
@property
def task_name(self):
return self._task_name
def _results_dir_path(self, key, stable):
"""Return a results directory path for the given key.
:param key: A CacheKey to generate an id for.
:param stable: True to use a stable subdirectory, false to use a portion of the cache key to
generate a path unique to the key.
"""
# TODO: Shorten cache_key hashes in general?
return os.path.join(
self._results_dir_prefix,
key.id,
self._STABLE_DIR_NAME if stable else sha1(key.hash).hexdigest()[:12]
)
def wrap_targets(self, targets, topological_order=False):
"""Wrap targets and their computed cache keys in VersionedTargets.
If the FingerprintStrategy opted out of providing a fingerprint for a target, that target will not
have an associated VersionedTarget returned.
Returns a list of VersionedTargets, each representing one input target.
"""
def vt_iter():
if topological_order:
target_set = set(targets)
sorted_targets = [t for t in reversed(sort_targets(targets)) if t in target_set]
else:
sorted_targets = sorted(targets)
for target in sorted_targets:
target_key = self._key_for(target)
if target_key is not None:
yield VersionedTarget(self, target, target_key)
return list(vt_iter())
def cacheable(self, cache_key):
"""Indicates whether artifacts associated with the given `cache_key` should be cached.
:return: `True` if the `cache_key` represents a cacheable set of target artifacts.
:rtype: bool
"""
return self._invalidator.cacheable(cache_key)
def previous_key(self, cache_key):
return self._invalidator.previous_key(cache_key)
def _key_for(self, target):
try:
return self._cache_key_generator.key_for_target(target,
transitive=self._invalidate_dependents,
fingerprint_strategy=self._fingerprint_strategy)
except Exception as e:
# This is a catch-all for problems we haven't caught up with and given a better diagnostic.
# TODO(Eric Ayers): If you see this exception, add a fix to catch the problem earlier.
new_exception = self.CacheValidationError("Problem validating target {} in {}: {}"
.format(target.id, target.address.spec_path, e))
raise_from(self.CacheValidationError(new_exception), e)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers for working with signatures in tf.saved_model.save."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.saved_model import revived_types
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training.tracking import base
from tensorflow.python.util import compat
from tensorflow.python.util import nest
DEFAULT_SIGNATURE_ATTR = "_default_save_signature"
SIGNATURE_ATTRIBUTE_NAME = "signatures"
def _get_signature(function):
if (isinstance(function, (defun.Function, def_function.Function)) and
function.input_signature is not None):
function = function.get_concrete_function()
if not isinstance(function, defun.ConcreteFunction):
return None
return function
def _valid_signature(concrete_function):
"""Returns whether concrete function can be converted to a signature."""
if not concrete_function.outputs:
# Functions without outputs don't make sense as signatures. We just don't
# have any way to run an Operation with no outputs as a SignatureDef in the
# 1.x style.
return False
try:
_normalize_outputs(concrete_function.structured_outputs, "unused", "unused")
except ValueError:
return False
return True
def find_function_to_export(saveable_view):
"""Function to export, None if no suitable function was found."""
# If the user did not specify signatures, check the root object for a function
# that can be made into a signature.
functions = saveable_view.list_functions(saveable_view.root)
signature = functions.get(DEFAULT_SIGNATURE_ATTR, None)
if signature is not None:
return signature
# TODO(andresp): Discuss removing this behaviour. It can lead to WTFs when a
# user decides to annotate more functions with tf.function and suddenly
# serving that model way later in the process stops working.
possible_signatures = []
for function in functions.values():
concrete = _get_signature(function)
if concrete is not None and _valid_signature(concrete):
possible_signatures.append(concrete)
if len(possible_signatures) == 1:
single_function = possible_signatures[0]
signature = _get_signature(single_function)
if signature and _valid_signature(signature):
return signature
return None
def canonicalize_signatures(signatures):
"""Converts `signatures` into a dictionary of concrete functions."""
if signatures is None:
return {}
if not isinstance(signatures, collections.Mapping):
signatures = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signatures}
concrete_signatures = {}
for signature_key, function in signatures.items():
signature_function = _get_signature(function)
if signature_function is None:
raise ValueError(
("Expected a TensorFlow function to generate a signature for, but "
"got {}. Only `tf.functions` with an input signature or "
"concrete functions can be used as a signature.").format(function))
# Re-wrap the function so that it returns a dictionary of Tensors. This
# matches the format of 1.x-style signatures.
# pylint: disable=cell-var-from-loop
@def_function.function
def signature_wrapper(**kwargs):
structured_outputs = signature_function(**kwargs)
return _normalize_outputs(
structured_outputs, signature_function.name, signature_key)
# TODO(b/123902469): Use ConcreteFunction.structured_inputs once their names
# always match keyword arguments.
tensor_spec_signature = {}
for keyword, tensor in zip(
signature_function._arg_keywords, # pylint: disable=protected-access
signature_function.inputs):
keyword = compat.as_str(keyword)
tensor_spec_signature[keyword] = tensor_spec.TensorSpec.from_tensor(
tensor, name=keyword)
final_concrete = signature_wrapper.get_concrete_function(
**tensor_spec_signature)
# pylint: disable=protected-access
if len(final_concrete._arg_keywords) == 1:
# If there is only one input to the signature, a very common case, then
# ordering is unambiguous and we can let people pass a positional
# argument. Since SignatureDefs are unordered (protobuf "map") multiple
# arguments means we need to be keyword-only.
final_concrete._num_positional_args = 1
else:
final_concrete._num_positional_args = 0
# pylint: enable=protected-access
concrete_signatures[signature_key] = final_concrete
# pylint: enable=cell-var-from-loop
return concrete_signatures
def _is_flat(sequence):
sequence_flat = nest.flatten(sequence)
try:
nest.assert_same_structure(sequence_flat, sequence)
return True
except ValueError:
return False
except TypeError:
return False
def _normalize_outputs(outputs, function_name, signature_key):
"""Construct an output dictionary from unnormalized function outputs."""
if isinstance(outputs, collections.Mapping):
for key, value in outputs.items():
if not isinstance(value, ops.Tensor):
raise ValueError(
("Got a dictionary containing non-Tensor value {} for key {} "
"in the output of the function {} used to generate a SavedModel "
"signature. Dictionaries outputs for functions used as signatures "
"should have one Tensor output per string key.")
.format(value, key, compat.as_str_any(function_name)))
return outputs
else:
original_outputs = outputs
if not isinstance(outputs, collections.Sequence):
outputs = [outputs]
if not _is_flat(outputs):
raise ValueError(
("Got non-flat outputs '{}' from '{}' for SavedModel "
"signature '{}'. Signatures have one Tensor per output, so "
"to have predictable names Python functions used to generate "
"these signatures should avoid outputting Tensors in nested "
"structures.")
.format(original_outputs, function_name, signature_key))
return {("output_{}".format(output_index)): output
for output_index, output
in enumerate(outputs)}
# _SignatureMap is immutable to ensure that users do not expect changes to be
# reflected in the SavedModel. Using public APIs, tf.saved_model.load() is the
# only way to create a _SignatureMap and there is no way to modify it. So we can
# safely ignore/overwrite ".signatures" attributes attached to objects being
# saved if they contain a _SignatureMap. A ".signatures" attribute containing
# any other type (e.g. a regular dict) will raise an exception asking the user
# to first "del obj.signatures" if they want it overwritten.
class _SignatureMap(collections.Mapping, base.Trackable):
"""A collection of SavedModel signatures."""
def __init__(self):
self._signatures = {}
def _add_signature(self, name, concrete_function):
"""Adds a signature to the _SignatureMap."""
# Ideally this object would be immutable, but restore is streaming so we do
# need a private API for adding new signatures to an existing object.
self._signatures[name] = concrete_function
def __getitem__(self, key):
return self._signatures[key]
def __iter__(self):
return iter(self._signatures)
def __len__(self):
return len(self._signatures)
def __repr__(self):
return "_SignatureMap({})".format(self._signatures)
def _list_functions_for_serialization(self):
return {
key: value for key, value in self.items()
if isinstance(value, (def_function.Function, defun.ConcreteFunction))
}
revived_types.register_revived_type(
"signature_map",
lambda obj: isinstance(obj, _SignatureMap),
versions=[revived_types.VersionedTypeRegistration(
# Standard dependencies are enough to reconstruct the trackable
# items in dictionaries, so we don't need to save any extra information.
object_factory=lambda proto: _SignatureMap(),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=_SignatureMap._add_signature # pylint: disable=protected-access
)])
def create_signature_map(signatures):
"""Creates an object containing `signatures`."""
signature_map = _SignatureMap()
for name, func in signatures.items():
# This true of any signature that came from canonicalize_signatures. Here as
# a sanity check on saving; crashing on load (e.g. in _add_signature) would
# be more problematic in case future export changes violated these
# assertions.
assert isinstance(func, defun.ConcreteFunction)
assert isinstance(func.structured_outputs, collections.Mapping)
# pylint: disable=protected-access
if len(func._arg_keywords) == 1:
assert 1 == func._num_positional_args
else:
assert 0 == func._num_positional_args
signature_map._add_signature(name, func)
# pylint: enable=protected-access
return signature_map
def validate_saveable_view(saveable_view):
"""Performs signature-related sanity checks on `saveable_view`."""
for name, dep in saveable_view.list_dependencies(
saveable_view.root):
if name == SIGNATURE_ATTRIBUTE_NAME:
if not isinstance(dep, _SignatureMap):
raise ValueError(
("Exporting an object {} which has an attribute named "
"'{signatures}'. This is a reserved attribute used to store "
"SavedModel signatures in objects which come from "
"`tf.saved_model.load`. Delete this attribute "
"(e.g. 'del obj.{signatures}') before saving if this shadowing is "
"acceptable.").format(
saveable_view.root,
signatures=SIGNATURE_ATTRIBUTE_NAME))
break
| |
from grid.models import Grid
from django.contrib.auth.models import Group, User, Permission
from package.models import Category, PackageExample, Package
from grid.models import Element, Feature, GridPackage
from core.tests import datautil
def load():
category, created = Category.objects.get_or_create(
pk=1,
slug='apps',
title='App',
description='Small components used to build projects.',
)
package1, created = Package.objects.get_or_create(
pk=1,
category=category,
repo_watchers=0,
title='Testability',
pypi_url='',
participants='malcomt,jacobian',
pypi_downloads=0,
repo_url='https://github.com/pydanny/django-la-facebook',
repo_forks=0,
slug='testability',
repo_description='Increase your testing ability with this steroid free supplement.',
)
package2, created = Package.objects.get_or_create(
pk=2,
category=category,
repo_watchers=0,
title='Supertester',
pypi_url='',
participants='thetestman',
pypi_downloads=0,
repo_url='https://github.com/pydanny/django-uni-form',
repo_forks=0,
slug='supertester',
repo_description='Test everything under the sun with one command!',
)
package3, created = Package.objects.get_or_create(
pk=3,
category=category,
repo_watchers=0,
title='Serious Testing',
pypi_url='',
participants='pydanny',
pypi_downloads=0,
repo_url='https://github.com/opencomparison/opencomparison',
repo_forks=0,
slug='serious-testing',
repo_description='Make testing as painless as waxing your legs.',
)
package4, created = Package.objects.get_or_create(
pk=4,
category=category,
repo_watchers=0,
title='Another Test',
pypi_url='',
participants='pydanny',
pypi_downloads=0,
repo_url='https://github.com/djangopackages/djangopackages',
repo_forks=0,
slug='another-test',
repo_description='Yet another test package, with no grid affiliation.',
)
grid1, created = Grid.objects.get_or_create(
pk=1,
description='A grid for testing.',
title='Testing',
is_locked=False,
slug='testing',
)
grid2, created = Grid.objects.get_or_create(
pk=2,
description='Another grid for testing.',
title='Another Testing',
is_locked=False,
slug='another-testing',
)
gridpackage1, created = GridPackage.objects.get_or_create(
pk=1,
package=package1,
grid=grid1,
)
gridpackage2, created = GridPackage.objects.get_or_create(
pk=2,
package=package1,
grid=grid1,
)
gridpackage3, created = GridPackage.objects.get_or_create(
pk=3,
package=package3,
grid=grid1,
)
gridpackage4, created = GridPackage.objects.get_or_create(
pk=4,
package=package3,
grid=grid2,
)
gridpackage5, created = GridPackage.objects.get_or_create(
pk=5,
package=package2,
grid=grid1,
)
feature1, created = Feature.objects.get_or_create(
pk=1,
title='Has tests?',
grid=grid1,
description='Does this package come with tests?',
)
feature2, created = Feature.objects.get_or_create(
pk=2,
title='Coolness?',
grid=grid1,
description='Is this package cool?',
)
element, created = Element.objects.get_or_create(
pk=1,
text='Yes',
feature=feature1,
grid_package=gridpackage1,
)
group1, created = Group.objects.get_or_create(
pk=1,
name='Moderators',
#permissions=[[u'delete_gridpackage', u'grid', u'gridpackage'], [u'delete_feature', u'grid', u'feature']],
)
group1.permissions.clear()
group1.permissions.set([
Permission.objects.get(codename='delete_gridpackage'),
Permission.objects.get(codename='delete_feature')
])
# password is 'user'
user1, created = User.objects.get_or_create(
pk=1,
username='user',
first_name='',
last_name='',
is_active=True,
is_superuser=False,
is_staff=False,
last_login='2010-01-01 12:00:00',
email='',
date_joined='2010-01-01 12:00:00',
)
user1.set_password('user')
user1.save()
user2, created = User.objects.get_or_create(
pk=2,
username='cleaner',
first_name='',
last_name='',
is_active=True,
is_superuser=False,
is_staff=False,
last_login='2010-01-01 12:00:00',
#groups=[group1],
password='pbkdf2_sha256$36000$Hp59Lym7JZyI$GVsyeRLCloSj4xI/1F5qf9dIZ2KF/ApMZFun7tiAxuc=',
email='',
date_joined='2010-01-01 12:00:00',
)
user2.groups.set([group1])
user2.set_password('cleaner')
user2.save()
user3, created = User.objects.get_or_create(
pk=3,
username='staff',
first_name='',
last_name='',
is_active=True,
is_superuser=False,
is_staff=True,
last_login='2010-01-01 12:00:00',
password='pbkdf2_sha256$36000$4Ytv7EOqXyNl$Wsnq1GncbyYDUQ5ieQIEBCsoolNWLcApXChKYS5Us4I=',
email='',
date_joined='2010-01-01 12:00:00',
)
user3.set_password('staff')
user3.save()
# password is 'admin'
user4, created = User.objects.get_or_create(
pk=4,
username='admin',
first_name='',
last_name='',
is_active=True,
is_superuser=True,
is_staff=True,
last_login='2010-01-01 12:00:00',
password='pbkdf2_sha256$36000$HizLkJV9vzk4$++1pBxJlH/uqIn5Qx0jugTH1b3U5SyZTaqnm+kSk7pQ=',
email='',
date_joined='2010-01-01 12:00:00',
)
user4.set_password('admin')
user4.save()
packageexample, created = PackageExample.objects.get_or_create(
pk=1,
package=package1,
url='http://www.example.com/',
active=True,
title='www.example.com',
)
packageexample2, created = PackageExample.objects.get_or_create(
pk=2,
package=package1,
url=u'http://my.example.com/',
active=True,
title=u'my.example.com',
created_by=user1,
)
packageexample3, created = PackageExample.objects.get_or_create(
pk=3,
package=package1,
url=u'http://other.example.com/',
active=True,
title=u'other.example.com',
created_by=user2,
)
datautil.reset_sequences(Grid, Group, User, Permission, Category, PackageExample,
Package, Element, Feature, GridPackage)
| |
# -*- coding: utf-8 -*-
from datetime import datetime
from bs4 import BeautifulSoup
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST, require_safe
from reportlab.lib import pagesizes
from reportlab.pdfgen.canvas import Canvas as PdfCanvas
from apps.lan.models import Attendee, LAN
from apps.seating.models import Seat, Seating
@require_safe
def main(request):
lans = LAN.objects.filter(end_date__gt=datetime.now()).order_by('-start_date')
if lans.count() == 1:
next_lan = lans[0]
return redirect('seating_details', lan_id=next_lan.id)
else:
return redirect('seating_lan_list')
@require_safe
def lan_list(request):
context = {}
context['upcoming_lans'] = LAN.objects.filter(end_date__gte=datetime.now()).order_by('start_date')
context['previous_lans'] = LAN.objects.filter(end_date__lt=datetime.now()).order_by('-start_date')
return render(request, 'seating/lan_list.html', context)
@require_safe
def main_filtered(request, lan_id):
lan = get_object_or_404(LAN, pk=lan_id)
context = {}
seating = Seating.objects.filter(lan=lan)
seatings = Seating.objects.all()
context['seatings'] = seatings
context['seating'] = seating
context['active'] = 'all'
context['lan'] = lan
breadcrumbs = (
(lan, lan.get_absolute_url()),
(_(u'Seating'), ''),
)
context['breadcrumbs'] = breadcrumbs
return render(request, 'seating/seating.html', context)
@require_safe
def seating_details(request, lan_id, seating_id=None, seat_id=None):
lan = get_object_or_404(LAN, pk=lan_id)
seatings = Seating.objects.filter(lan=lan)
if not seating_id:
if seatings:
seating = seatings[0]
return redirect(seating)
else:
return render(request, 'seating/seating.html')
seating = get_object_or_404(Seating, pk=seating_id, lan=lan)
seats = seating.get_total_seats()
dom = BeautifulSoup(seating.layout.template, 'html.parser')
seat_counter = 0
for tag in dom.find_all('a'):
seat_counter += 1
seat_qs = seats.filter(placement=seat_counter)
if not seat_qs.exists():
continue
seat = seat_qs[0]
children = tag.find_all('rect')
children[0]['seat-number'] = seat.pk
children[0]['seat-display'] = seat.placement
if not seat.user:
children[0]['class'] = ' seating-node-free'
children[0]['status'] = 'free'
else:
if seat.user == request.user:
children[0]['class'] = ' seating-node-self'
children[0]['status'] = 'mine'
else:
children[0]['class'] = ' seating-node-occupied'
children[0]['status'] = 'occupied'
children[0]['seat-user'] = unicode(seat.user.username)
# Separate title element for chrome support
title = dom.new_tag('title')
title.string = unicode(seat.user.username)
tag.append(title)
dom.encode('utf-8')
context = {}
context['lan'] = lan
context['seatings'] = seatings
context['seating'] = seating
context['seat'] = seat_id
if request.user.is_authenticated:
context['user_ticket_types'] = seating.ticket_types.filter(ticket__user=request.user)
context['hide_sidebar'] = True
context['template'] = dom.__str__
context['breadcrumbs'] = (
(lan, lan.get_absolute_url()),
(_(u'Seating'), ''),
)
return render(request, 'seating/seating.html', context)
@require_POST
@login_required()
def take_seat(request, seating_id):
seating = get_object_or_404(Seating, pk=seating_id)
lan = seating.lan
if not seating.is_open():
messages.error(request, _(u'The seating is closed.'))
return redirect(seating)
seat_id = get_post_seat_id(request, seating)
if not seat_id:
return redirect(seating)
seat = get_object_or_404(Seat, pk=seat_id)
siblings = list(Seating.objects.filter(lan=lan))
occupied = seating.get_user_registered()
for sibling in siblings:
occupied = occupied + sibling.get_user_registered()
try:
attendee = Attendee.objects.get(user=request.user, lan=lan)
except ObjectDoesNotExist:
attendee = None
if (attendee and attendee.has_paid) or lan.has_ticket(request.user):
if not seating.ticket_types or (lan.has_ticket(request.user) and lan.has_ticket(request.user).ticket_type in seating.ticket_types.all()):
if not seat.user:
if request.user in occupied:
old_seats = Seat.objects.filter(user=request.user)
for os in old_seats:
if os.seating.lan == lan:
os.user = None
os.save()
seat.user = request.user
seat.save()
messages.success(request, _(u'You have reserved your seat.'))
else:
messages.error(request, _(u'That seat is already taken.'))
else:
messages.warning(request, _(u'Your ticket does not work in this seating area.'))
else:
messages.warning(request, _(u'You need a ticket before reserving a seat.'))
return redirect(lan)
return redirect(seating)
@require_POST
@login_required()
def leave_seat(request, seating_id):
seating = get_object_or_404(Seating, pk=seating_id)
if not seating.is_open():
messages.error(request, _(u'The seating is closed.'))
return redirect(seating)
seat_id = get_post_seat_id(request, seating)
if not seat_id:
return redirect(seating)
seat = get_object_or_404(Seat, pk=seat_id)
if seat.user == request.user:
seat.user = None
seat.save()
messages.success(request, _(u'You have unreserved your seat.'))
else:
messages.error(request, _(u'This is not your seat.'))
return redirect(seating)
def get_post_seat_id(request, seating):
seat_id_str = request.POST.get('seat')
if not seat_id_str:
messages.error(request, _(u'No seat was specified.'))
return None
try:
seat_id = int(seat_id_str)
except ValueError:
messages.error(request, _(u'Illegal seat.'))
return None
return seat_id
@require_safe
@permission_required('seating.export_seating')
def seating_list(request, seating_id):
seating = get_object_or_404(Seating, pk=seating_id)
lan = get_object_or_404(LAN, id=seating.lan.id)
seats = list(Seat.objects.filter(seating=seating).order_by('placement'))
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=' + seating.title + '.pdf'
page_size = pagesizes.A4
page_width, page_height = page_size
page_width_center = page_width / 2
p = PdfCanvas(response, pagesize=page_size)
cursor_top = page_height - 120
left_col_offset = 50
right_col_offset = 300
# 1 is left, 2 is right
page_side = 1
cursor = cursor_top
new_page = True
x_offset = left_col_offset
page_num = 1
for seat in seats:
if cursor < 70:
# Flip to right side or new page
cursor = cursor_top
if page_side == 1:
page_side = 2
x_offset = right_col_offset
else:
page_side = 1
x_offset = left_col_offset
new_page = True
page_num += 1
p.showPage()
if new_page:
new_page = False
p.setFont('Times-Roman', 25)
p.drawCentredString(page_width_center, page_height - 60, lan.title)
p.setFont('Times-Roman', 20)
p.drawCentredString(page_width_center, page_height - 90, seating.title)
p.setFont('Helvetica', 14)
p.drawCentredString(page_width_center, 40, '{0} {1}'.format(_(u'Page'), page_num))
# For seat text
p.setFont('Helvetica', 14)
occupant = unicode(seat.user) if seat.user else ''
p.drawString(x_offset, cursor, u'{0} {1}: {2}'.format(_(u'Seat'), seat.placement, occupant))
cursor -= 20
p.showPage()
p.save()
return response
@require_safe
@permission_required('seating.export_seating')
def seating_map(request, seating_id):
seating = get_object_or_404(Seating, pk=seating_id)
lan = get_object_or_404(LAN, id=seating.lan.id)
seats = list(Seat.objects.filter(seating=seating).order_by('placement'))
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=' + seating.title + '.pdf'
page_size = pagesizes.A4
page_width, page_height = page_size
page_width_center = page_width / 2
page_height_center = page_height / 2
p = PdfCanvas(response, pagesize=page_size)
new_page = True
y_offset = page_height_center
for seat in seats:
p.setFont('Times-Roman', 40)
p.drawCentredString(page_width_center, y_offset + 310, lan.title)
p.setFont('Times-Roman', 35)
text = _(u'%(seating)s, seat %(seat)d') % {'seating': seating.title, 'seat': seat.placement}
p.drawCentredString(page_width_center, y_offset + 250, text)
if seat.user:
p.setFont('Helvetica', 40)
occupant = unicode(seat.user)
else:
p.setFont('Helvetica', 40)
occupant = _(u'(Available)')
p.drawCentredString(page_width_center, y_offset + 150, occupant)
if new_page:
new_page = False
y_offset = 0
else:
new_page = True
y_offset = page_height_center
p.showPage()
p.showPage()
p.save()
return response
| |
from __future__ import unicode_literals
import re
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
from django.db.models.lookups import Lookup
from django.db.models.sql.expressions import SQLEvaluator
from django.utils import six
gis_lookups = {}
class GISLookup(Lookup):
sql_template = None
transform_func = None
distance = False
@classmethod
def _check_geo_field(cls, opts, lookup):
"""
Utility for checking the given lookup with the given model options.
The lookup is a string either specifying the geographic field, e.g.
'point, 'the_geom', or a related lookup on a geographic field like
'address__point'.
If a GeometryField exists according to the given lookup on the model
options, it will be returned. Otherwise returns None.
"""
from django.contrib.gis.db.models.fields import GeometryField
# This takes into account the situation where the lookup is a
# lookup to a related geographic field, e.g., 'address__point'.
field_list = lookup.split(LOOKUP_SEP)
# Reversing so list operates like a queue of related lookups,
# and popping the top lookup.
field_list.reverse()
fld_name = field_list.pop()
try:
geo_fld = opts.get_field(fld_name)
# If the field list is still around, then it means that the
# lookup was for a geometry field across a relationship --
# thus we keep on getting the related model options and the
# model field associated with the next field in the list
# until there's no more left.
while len(field_list):
opts = geo_fld.rel.to._meta
geo_fld = opts.get_field(field_list.pop())
except (FieldDoesNotExist, AttributeError):
return False
# Finally, make sure we got a Geographic field and return.
if isinstance(geo_fld, GeometryField):
return geo_fld
else:
return False
def get_db_prep_lookup(self, value, connection):
# get_db_prep_lookup is called by process_rhs from super class
if isinstance(value, (tuple, list)):
# First param is assumed to be the geometric object
params = [connection.ops.Adapter(value[0])] + list(value)[1:]
else:
params = [connection.ops.Adapter(value)]
return ('%s', params)
def process_rhs(self, qn, connection):
rhs, rhs_params = super(GISLookup, self).process_rhs(qn, connection)
geom = self.rhs
if isinstance(self.rhs, SQLEvaluator):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = self._check_geo_field(self.rhs.opts, self.rhs.expression.name)
if not geo_fld:
raise ValueError('No geographic field found in expression.')
self.rhs.srid = geo_fld.srid
elif isinstance(self.rhs, (list, tuple)):
geom = self.rhs[0]
rhs = connection.ops.get_geom_placeholder(self.lhs.source, geom)
return rhs, rhs_params
def as_sql(self, qn, connection):
lhs_sql, sql_params = self.process_lhs(qn, connection)
rhs_sql, rhs_params = self.process_rhs(qn, connection)
sql_params.extend(rhs_params)
template_params = {'lhs': lhs_sql, 'rhs': rhs_sql}
backend_op = connection.ops.gis_operators[self.lookup_name]
return backend_op.as_sql(connection, self, template_params, sql_params)
# ------------------
# Geometry operators
# ------------------
class OverlapsLeftLookup(GISLookup):
"""
The overlaps_left operator returns true if A's bounding box overlaps or is to the
left of B's bounding box.
"""
lookup_name = 'overlaps_left'
gis_lookups['overlaps_left'] = OverlapsLeftLookup
class OverlapsRightLookup(GISLookup):
"""
The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the
right of B's bounding box.
"""
lookup_name = 'overlaps_right'
gis_lookups['overlaps_right'] = OverlapsRightLookup
class OverlapsBelowLookup(GISLookup):
"""
The 'overlaps_below' operator returns true if A's bounding box overlaps or is below
B's bounding box.
"""
lookup_name = 'overlaps_below'
gis_lookups['overlaps_below'] = OverlapsBelowLookup
class OverlapsAboveLookup(GISLookup):
"""
The 'overlaps_above' operator returns true if A's bounding box overlaps or is above
B's bounding box.
"""
lookup_name = 'overlaps_above'
gis_lookups['overlaps_above'] = OverlapsAboveLookup
class LeftLookup(GISLookup):
"""
The 'left' operator returns true if A's bounding box is strictly to the left
of B's bounding box.
"""
lookup_name = 'left'
gis_lookups['left'] = LeftLookup
class RightLookup(GISLookup):
"""
The 'right' operator returns true if A's bounding box is strictly to the right
of B's bounding box.
"""
lookup_name = 'right'
gis_lookups['right'] = RightLookup
class StrictlyBelowLookup(GISLookup):
"""
The 'strictly_below' operator returns true if A's bounding box is strictly below B's
bounding box.
"""
lookup_name = 'strictly_below'
gis_lookups['strictly_below'] = StrictlyBelowLookup
class StrictlyAboveLookup(GISLookup):
"""
The 'strictly_above' operator returns true if A's bounding box is strictly above B's
bounding box.
"""
lookup_name = 'strictly_above'
gis_lookups['strictly_above'] = StrictlyAboveLookup
class SameAsLookup(GISLookup):
"""
The "~=" operator is the "same as" operator. It tests actual geometric
equality of two features. So if A and B are the same feature,
vertex-by-vertex, the operator returns true.
"""
lookup_name = 'same_as'
gis_lookups['same_as'] = SameAsLookup
class ExactLookup(SameAsLookup):
# Alias of same_as
lookup_name = 'exact'
gis_lookups['exact'] = ExactLookup
class BBContainsLookup(GISLookup):
"""
The 'bbcontains' operator returns true if A's bounding box completely contains
by B's bounding box.
"""
lookup_name = 'bbcontains'
gis_lookups['bbcontains'] = BBContainsLookup
class BBOverlapsLookup(GISLookup):
"""
The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.
"""
lookup_name = 'bboverlaps'
gis_lookups['bboverlaps'] = BBOverlapsLookup
class ContainedLookup(GISLookup):
"""
The 'contained' operator returns true if A's bounding box is completely contained
by B's bounding box.
"""
lookup_name = 'contained'
gis_lookups['contained'] = ContainedLookup
# ------------------
# Geometry functions
# ------------------
class ContainsLookup(GISLookup):
lookup_name = 'contains'
gis_lookups['contains'] = ContainsLookup
class ContainsProperlyLookup(GISLookup):
lookup_name = 'contains_properly'
gis_lookups['contains_properly'] = ContainsProperlyLookup
class CoveredByLookup(GISLookup):
lookup_name = 'coveredby'
gis_lookups['coveredby'] = CoveredByLookup
class CoversLookup(GISLookup):
lookup_name = 'covers'
gis_lookups['covers'] = CoversLookup
class CrossesLookup(GISLookup):
lookup_name = 'crosses'
gis_lookups['crosses'] = CrossesLookup
class DisjointLookup(GISLookup):
lookup_name = 'disjoint'
gis_lookups['disjoint'] = DisjointLookup
class EqualsLookup(GISLookup):
lookup_name = 'equals'
gis_lookups['equals'] = EqualsLookup
class IntersectsLookup(GISLookup):
lookup_name = 'intersects'
gis_lookups['intersects'] = IntersectsLookup
class OverlapsLookup(GISLookup):
lookup_name = 'overlaps'
gis_lookups['overlaps'] = OverlapsLookup
class RelateLookup(GISLookup):
lookup_name = 'relate'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def get_db_prep_lookup(self, value, connection):
if len(value) != 2:
raise ValueError('relate must be passed a two-tuple')
# Check the pattern argument
backend_op = connection.ops.gis_operators[self.lookup_name]
if hasattr(backend_op, 'check_relate_argument'):
backend_op.check_relate_argument(value[1])
else:
pattern = value[1]
if not isinstance(pattern, six.string_types) or not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
return super(RelateLookup, self).get_db_prep_lookup(value, connection)
gis_lookups['relate'] = RelateLookup
class TouchesLookup(GISLookup):
lookup_name = 'touches'
gis_lookups['touches'] = TouchesLookup
class WithinLookup(GISLookup):
lookup_name = 'within'
gis_lookups['within'] = WithinLookup
class DistanceLookupBase(GISLookup):
distance = True
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %%s'
def get_db_prep_lookup(self, value, connection):
if isinstance(value, (tuple, list)):
if not 2 <= len(value) <= 3:
raise ValueError("2 or 3-element tuple required for '%s' lookup." % self.lookup_name)
params = [connection.ops.Adapter(value[0])]
# Getting the distance parameter in the units of the field.
params += connection.ops.get_distance(self.lhs.output_field, value[1:], self.lookup_name)
return ('%s', params)
else:
return super(DistanceLookupBase, self).get_db_prep_lookup(value, connection)
class DWithinLookup(DistanceLookupBase):
lookup_name = 'dwithin'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
gis_lookups['dwithin'] = DWithinLookup
class DistanceGTLookup(DistanceLookupBase):
lookup_name = 'distance_gt'
gis_lookups['distance_gt'] = DistanceGTLookup
class DistanceGTELookup(DistanceLookupBase):
lookup_name = 'distance_gte'
gis_lookups['distance_gte'] = DistanceGTELookup
class DistanceLTLookup(DistanceLookupBase):
lookup_name = 'distance_lt'
gis_lookups['distance_lt'] = DistanceLTLookup
class DistanceLTELookup(DistanceLookupBase):
lookup_name = 'distance_lte'
gis_lookups['distance_lte'] = DistanceLTELookup
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import versionutils
from nova import objects
from nova.objects import fields as obj_fields
from nova.scheduler.filters import image_props_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestImagePropsFilter(test.NoDBTestCase):
def setUp(self):
super(TestImagePropsFilter, self).setUp()
self.filt_cls = image_props_filter.ImagePropertiesFilter()
def test_image_properties_filter_passes_same_inst_props_and_version(self):
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_architecture=obj_fields.Architecture.X86_64,
img_hv_type=obj_fields.HVType.KVM,
hw_vm_mode=obj_fields.VMMode.HVM,
img_hv_requested_version='>=6.0,<6.2'))
spec_obj = objects.RequestSpec(image=img_props)
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
capabilities = {
'supported_instances': [(
obj_fields.Architecture.X86_64,
obj_fields.HVType.KVM,
obj_fields.VMMode.HVM)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_uses_default_conf_value(self):
self.flags(image_properties_default_architecture='x86_64',
group='filter_scheduler')
img_props = objects.ImageMeta(properties=objects.ImageMetaProps())
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'supported_instances': [(
obj_fields.Architecture.AARCH64,
obj_fields.HVType.KVM,
obj_fields.VMMode.HVM)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_fails_different_inst_props(self):
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_architecture=obj_fields.Architecture.ARMV7,
img_hv_type=obj_fields.HVType.QEMU,
hw_vm_mode=obj_fields.VMMode.HVM))
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'supported_instances': [(
obj_fields.Architecture.X86_64,
obj_fields.HVType.KVM,
obj_fields.VMMode.HVM)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_fails_different_hyper_version(self):
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_architecture=obj_fields.Architecture.X86_64,
img_hv_type=obj_fields.HVType.KVM,
hw_vm_mode=obj_fields.VMMode.HVM,
img_hv_requested_version='>=6.2'))
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'enabled': True,
'supported_instances': [(
obj_fields.Architecture.X86_64,
obj_fields.HVType.KVM,
obj_fields.VMMode.HVM)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_passes_partial_inst_props(self):
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_architecture=obj_fields.Architecture.X86_64,
hw_vm_mode=obj_fields.VMMode.HVM))
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'supported_instances': [(
obj_fields.Architecture.X86_64,
obj_fields.HVType.KVM,
obj_fields.VMMode.HVM)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_fails_partial_inst_props(self):
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_architecture=obj_fields.Architecture.X86_64,
hw_vm_mode=obj_fields.VMMode.HVM))
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'supported_instances': [(
obj_fields.Architecture.X86_64,
obj_fields.HVType.XEN,
obj_fields.VMMode.XEN)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_passes_without_inst_props(self):
spec_obj = objects.RequestSpec(image=None)
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
capabilities = {
'supported_instances': [(
obj_fields.Architecture.X86_64,
obj_fields.HVType.KVM,
obj_fields.VMMode.HVM)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_fails_without_host_props(self):
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_architecture=obj_fields.Architecture.X86_64,
img_hv_type=obj_fields.HVType.KVM,
hw_vm_mode=obj_fields.VMMode.HVM))
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'enabled': True,
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_passes_without_hyper_version(self):
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_architecture=obj_fields.Architecture.X86_64,
img_hv_type=obj_fields.HVType.KVM,
hw_vm_mode=obj_fields.VMMode.HVM,
img_hv_requested_version='>=6.0'))
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'enabled': True,
'supported_instances': [(
obj_fields.Architecture.X86_64,
obj_fields.HVType.KVM,
obj_fields.VMMode.HVM)]}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_fails_with_unsupported_hyper_ver(self):
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_architecture=obj_fields.Architecture.X86_64,
img_hv_type=obj_fields.HVType.KVM,
hw_vm_mode=obj_fields.VMMode.HVM,
img_hv_requested_version='>=6.0'))
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'enabled': True,
'supported_instances': [(
obj_fields.Architecture.X86_64,
obj_fields.HVType.KVM,
obj_fields.VMMode.HVM)],
'hypervisor_version': 5000}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_pv_mode_compat(self):
# if an old image has 'pv' for a vm_mode it should be treated as xen
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_vm_mode='pv'))
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'supported_instances': [(
obj_fields.Architecture.X86_64,
obj_fields.HVType.XEN,
obj_fields.VMMode.XEN)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_hvm_mode_compat(self):
# if an old image has 'hv' for a vm_mode it should be treated as xen
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_vm_mode='hv'))
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'supported_instances': [(
obj_fields.Architecture.X86_64,
obj_fields.HVType.KVM,
obj_fields.VMMode.HVM)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_xen_arch_compat(self):
# if an old image has 'x86_32' for arch it should be treated as i686
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_architecture='x86_32'))
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'supported_instances': [(
obj_fields.Architecture.I686,
obj_fields.HVType.KVM,
obj_fields.VMMode.HVM)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_xen_hv_type_compat(self):
# if an old image has 'xapi' for hv_type it should be treated as xen
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
img_hv_type='xapi'))
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'supported_instances': [(
obj_fields.Architecture.I686,
obj_fields.HVType.XEN,
obj_fields.VMMode.HVM)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_image_properties_filter_baremetal_vmmode_compat(self):
# if an old image has 'baremetal' for vmmode it should be
# treated as hvm
img_props = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_vm_mode='baremetal'))
hypervisor_version = versionutils.convert_version_to_int('6.0.0')
spec_obj = objects.RequestSpec(image=img_props)
capabilities = {
'supported_instances': [(
obj_fields.Architecture.I686,
obj_fields.HVType.BAREMETAL,
obj_fields.VMMode.HVM)],
'hypervisor_version': hypervisor_version}
host = fakes.FakeHostState('host1', 'node1', capabilities)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
| |
"""
Tests of the permissions on specific models in the auth app. For tests of the permissions system itself, see test_permission_classes.py
"""
from __future__ import absolute_import, print_function, unicode_literals
from django.test import TestCase
from .helpers import create_dummy_facility_data
from ..constants import role_kinds
from ..errors import InvalidHierarchyRelationsArgument
from ..filters import HierarchyRelationsFilter
from ..models import DeviceOwner, Facility, FacilityDataset, Classroom, LearnerGroup, Role, Membership, FacilityUser, KolibriAnonymousUser
class ImproperUsageIsProperlyHandledTestCase(TestCase):
"""
Tests that error cases and misuse of the interface are properly caught.
"""
def setUp(self):
self.data1 = create_dummy_facility_data()
self.data2 = create_dummy_facility_data()
self.device_owner = DeviceOwner.objects.create(username="boss")
self.anon_user = KolibriAnonymousUser()
def test_that_checking_creation_perms_on_invalid_model_returns_false(self):
# cannot create a LearnerGroup with invalid attribute name
self.assertFalse(self.data1["facility_admin"].can_create(LearnerGroup, {"bad_attr_name": 77, "parent": self.data1["facility"]}))
# cannot create a LearnerGroup with missing attribute value ("name")
self.assertFalse(self.data1["facility_admin"].can_create(LearnerGroup, {"parent": self.data1["facility"]}))
def test_that_getting_roles_for_noncollection_fails(self):
with self.assertRaises(ValueError):
self.data1["facility_admin"].get_roles_for(object())
with self.assertRaises(ValueError):
self.data1["facility_admin"].has_role_for([role_kinds.ADMIN], object())
def test_that_getting_roles_for_deviceowner_returns_false(self):
self.assertFalse(self.data1["facility_admin"].has_role_for_user([role_kinds.ADMIN], self.device_owner))
def test_that_getting_roles_for_anonuser_returns_false(self):
self.assertFalse(self.data1["facility_admin"].has_role_for_user([role_kinds.ADMIN], self.anon_user))
def test_that_getting_roles_for_user_in_other_facility_returns_false(self):
self.assertFalse(self.data1["facility_admin"].has_role_for_user([role_kinds.ADMIN], self.data2["learners_one_group"][0][0]))
def test_that_invalid_references_to_hierarchyrelationsfilter_throw_errors(self):
with self.assertRaises(InvalidHierarchyRelationsArgument):
HierarchyRelationsFilter(Facility).filter_by_hierarchy(target_user=object())
with self.assertRaises(InvalidHierarchyRelationsArgument):
HierarchyRelationsFilter(Facility).filter_by_hierarchy(target_user=["test"])
class FacilityDatasetPermissionsTestCase(TestCase):
"""
Tests of permissions for reading/modifying FacilityData instances
"""
def setUp(self):
self.data1 = create_dummy_facility_data()
self.data2 = create_dummy_facility_data()
self.device_owner = DeviceOwner.objects.create(username="boss")
self.anon_user = KolibriAnonymousUser()
def test_facility_users_and_anon_users_cannot_create_facility_dataset(self):
""" FacilityUsers can't create new Facilities, regardless of their roles """
new_facility_dataset = {}
self.assertFalse(self.data1["facility_admin"].can_create(FacilityDataset, new_facility_dataset))
self.assertFalse(self.data1["classroom_coaches"][0].can_create(FacilityDataset, new_facility_dataset))
self.assertFalse(self.data1["learners_one_group"][0][0].can_create(FacilityDataset, new_facility_dataset))
self.assertFalse(self.data1["unattached_users"][0].can_create(FacilityDataset, new_facility_dataset))
self.assertFalse(self.data1["unattached_users"][0].can_create(FacilityDataset, new_facility_dataset))
def test_anon_users_cannot_read_facility_dataset(self):
""" KolibriAnonymousUser cannot read Facility objects """
self.assertFalse(self.anon_user.can_read(self.data1["dataset"]))
self.assertNotIn(self.data1["dataset"], self.anon_user.filter_readable(FacilityDataset.objects.all()))
def test_only_facility_admins_can_update_own_facility_dataset(self):
""" The only FacilityUser who can update a FacilityDataset is a facility admin for that FacilityDataset """
own_dataset = self.data1["dataset"]
self.assertTrue(self.data1["facility_admin"].can_update(own_dataset))
self.assertFalse(self.data1["classroom_coaches"][0].can_update(own_dataset))
self.assertFalse(self.data1["learners_one_group"][0][0].can_update(own_dataset))
self.assertFalse(self.data1["unattached_users"][0].can_update(own_dataset))
self.assertFalse(self.anon_user.can_update(own_dataset))
def test_facility_users_and_anon_users_cannot_delete_own_facility_dataset(self):
""" FacilityUsers can't delete own FacilityDataset, regardless of their roles """
own_dataset = self.data1["dataset"]
self.assertFalse(self.data1["facility_admin"].can_delete(own_dataset))
self.assertFalse(self.data1["classroom_coaches"][0].can_delete(own_dataset))
self.assertFalse(self.data1["learners_one_group"][0][0].can_delete(own_dataset))
self.assertFalse(self.data1["unattached_users"][0].can_delete(own_dataset))
self.assertFalse(self.anon_user.can_delete(own_dataset))
def test_facility_users_cannot_delete_other_facility_dataset(self):
""" FacilityUsers can't delete other FacilityDataset, regardless of their roles """
other_facility_dataset = self.data2["dataset"]
self.assertFalse(self.data1["facility_admin"].can_delete(other_facility_dataset))
self.assertFalse(self.data1["classroom_coaches"][0].can_delete(other_facility_dataset))
self.assertFalse(self.data1["learners_one_group"][0][0].can_delete(other_facility_dataset))
self.assertFalse(self.data1["unattached_users"][0].can_delete(other_facility_dataset))
def test_device_owner_can_do_anything_to_a_facility_dataset(self):
""" DeviceOwner can do anything to a FacilityDataset """
new_facility_data = {}
self.assertTrue(self.device_owner.can_create(FacilityDataset, new_facility_data))
facility_dataset = self.data1["dataset"]
self.assertTrue(self.device_owner.can_read(facility_dataset))
self.assertTrue(self.device_owner.can_update(facility_dataset))
self.assertTrue(self.device_owner.can_delete(facility_dataset))
self.assertSetEqual(set(FacilityDataset.objects.all()), set(self.device_owner.filter_readable(FacilityDataset.objects.all())))
class FacilityPermissionsTestCase(TestCase):
"""
Tests of permissions for reading/modifying Facility instances
"""
def setUp(self):
self.data1 = create_dummy_facility_data()
self.data2 = create_dummy_facility_data(allow_sign_ups=True)
self.device_owner = DeviceOwner.objects.create(username="boss")
self.anon_user = KolibriAnonymousUser()
def test_facility_users_and_anon_users_cannot_create_facility(self):
""" FacilityUsers can't create new Facilities, regardless of their roles """
new_facility_data = {"name": "Home"}
self.assertFalse(self.data1["facility_admin"].can_create(Facility, new_facility_data))
self.assertFalse(self.data1["classroom_coaches"][0].can_create(Facility, new_facility_data))
self.assertFalse(self.data1["learners_one_group"][0][0].can_create(Facility, new_facility_data))
self.assertFalse(self.data1["unattached_users"][0].can_create(Facility, new_facility_data))
self.assertFalse(self.data1["unattached_users"][0].can_create(Facility, new_facility_data))
def test_facility_users_can_read_own_facility(self):
""" FacilityUsers can read their own Facility, regardless of their roles """
own_facility = self.data1["facility"]
for user in [self.data1["facility_admin"], self.data1["classroom_coaches"][0],
self.data1["learners_one_group"][0][0], self.data1["unattached_users"][0]]:
self.assertTrue(user.can_read(own_facility))
self.assertIn(own_facility, user.filter_readable(Facility.objects.all()))
def test_facility_users_cannot_read_other_facility(self):
""" FacilityUsers cannot read other Facilities, regardless of their roles """
other_facility = self.data2["facility"]
for user in [self.data1["facility_admin"], self.data1["classroom_coaches"][0],
self.data1["learners_one_group"][0][0], self.data1["unattached_users"][0]]:
self.assertFalse(user.can_read(other_facility))
self.assertNotIn(other_facility, user.filter_readable(Facility.objects.all()))
def test_anon_users_cannot_read_facility(self):
""" KolibriAnonymousUser cannot read Facility objects """
self.assertFalse(self.anon_user.can_read(self.data1["facility"]))
self.assertNotIn(self.data1["facility"], self.anon_user.filter_readable(Facility.objects.all()))
def test_only_facility_admins_can_update_own_facility(self):
""" The only FacilityUser who can update a Facility is a facility admin for that Facility """
own_facility = self.data1["facility"]
self.assertTrue(self.data1["facility_admin"].can_update(own_facility))
self.assertFalse(self.data1["classroom_coaches"][0].can_update(own_facility))
self.assertFalse(self.data1["learners_one_group"][0][0].can_update(own_facility))
self.assertFalse(self.data1["unattached_users"][0].can_update(own_facility))
self.assertFalse(self.anon_user.can_update(own_facility))
def test_facility_users_cannot_update_other_facility(self):
""" FacilityUsers cannot update other Facilities, regardless of their roles """
other_facility = self.data2["facility"]
self.assertFalse(self.data1["facility_admin"].can_update(other_facility))
self.assertFalse(self.data1["classroom_coaches"][0].can_update(other_facility))
self.assertFalse(self.data1["learners_one_group"][0][0].can_update(other_facility))
self.assertFalse(self.data1["unattached_users"][0].can_update(other_facility))
def test_facility_users_and_anon_users_cannot_delete_own_facility(self):
""" FacilityUsers can't delete own Facility, regardless of their roles """
own_facility = self.data1["facility"]
self.assertFalse(self.data1["facility_admin"].can_delete(own_facility))
self.assertFalse(self.data1["classroom_coaches"][0].can_delete(own_facility))
self.assertFalse(self.data1["learners_one_group"][0][0].can_delete(own_facility))
self.assertFalse(self.data1["unattached_users"][0].can_delete(own_facility))
self.assertFalse(self.anon_user.can_delete(own_facility))
def test_facility_users_cannot_delete_other_facility(self):
""" FacilityUsers can't delete other Facility, regardless of their roles """
other_facility = self.data2["facility"]
self.assertFalse(self.data1["facility_admin"].can_delete(other_facility))
self.assertFalse(self.data1["classroom_coaches"][0].can_delete(other_facility))
self.assertFalse(self.data1["learners_one_group"][0][0].can_delete(other_facility))
self.assertFalse(self.data1["unattached_users"][0].can_delete(other_facility))
def test_device_owner_can_do_anything_to_a_facility(self):
""" DeviceOwner can do anything to a Facility """
new_facility_data = {"name": "Home"}
self.assertTrue(self.device_owner.can_create(Facility, new_facility_data))
facility = self.data1["facility"]
self.assertTrue(self.device_owner.can_read(facility))
self.assertTrue(self.device_owner.can_update(facility))
self.assertTrue(self.device_owner.can_delete(facility))
self.assertSetEqual(set(Facility.objects.all()), set(self.device_owner.filter_readable(Facility.objects.all())))
def test_anon_user_can_read_facilities_that_allow_sign_ups(self):
can_not_sign_up_facility = self.data1['facility']
can_sign_up_facility = self.data2['facility']
self.assertFalse(self.anon_user.can_read(can_not_sign_up_facility))
self.assertTrue(self.anon_user.can_read(can_sign_up_facility))
def test_anon_user_filters_facility_datasets_that_allow_sign_ups(self):
sign_ups = Facility.objects.filter(dataset__learner_can_sign_up=True)
filtered = self.anon_user.filter_readable(Facility.objects.all())
self.assertEqual(set(sign_ups), set(filtered))
def test_anon_user_can_only_read_facilities_that_allow_sign_ups(self):
self.assertFalse(self.anon_user.can_read(self.data2['classrooms'][0]))
self.assertFalse(self.anon_user.can_read(self.data2['learnergroups'][0][0]))
class ClassroomPermissionsTestCase(TestCase):
"""
Tests of permissions for reading/modifying Classroom instances
"""
def setUp(self):
self.data = create_dummy_facility_data()
self.member = self.data["learners_one_group"][0][0]
self.own_classroom = self.data["classrooms"][0]
self.other_classroom = self.data["classrooms"][1]
self.own_classroom_coach = self.data["classroom_coaches"][0]
self.own_classroom_admin = self.data["classroom_admins"][0]
self.device_owner = DeviceOwner.objects.create(username="boss")
self.anon_user = KolibriAnonymousUser()
def test_only_facility_admin_can_create_classroom(self):
""" The only FacilityUser who can create a Classroom is a facility admin for the Facility """
new_classroom_data = {"name": "Home", "parent": self.data["facility"]}
self.assertTrue(self.data["facility_admin"].can_create(Classroom, new_classroom_data))
self.assertFalse(self.own_classroom_coach.can_create(Classroom, new_classroom_data))
self.assertFalse(self.member.can_create(Classroom, new_classroom_data))
self.assertFalse(self.data["unattached_users"][0].can_create(Classroom, new_classroom_data))
self.assertFalse(self.anon_user.can_create(Classroom, new_classroom_data))
def test_members_can_read_own_classroom(self):
""" Members of a Classroom can read that Classroom, as can coaches and admins for the Classroom """
for user in [self.data["facility_admin"], self.own_classroom_coach,
self.own_classroom_admin, self.member]:
self.assertTrue(user.can_read(self.own_classroom))
self.assertIn(self.own_classroom, user.filter_readable(Classroom.objects.all()))
def test_members_and_classroom_admins_and_coaches_can_read_other_classroom(self):
""" Members and admins/coaches for a Classroom can read another Classroom """
for user in [self.data["facility_admin"], self.own_classroom_coach,
self.own_classroom_admin, self.member]:
self.assertTrue(user.can_read(self.other_classroom))
self.assertIn(self.other_classroom, user.filter_readable(Classroom.objects.all()))
def test_only_admins_can_update_own_classroom(self):
""" The only FacilityUsers who can update a Classroom are admins for that Classroom (or for the Facility) """
self.assertTrue(self.data["facility_admin"].can_update(self.own_classroom))
self.assertTrue(self.own_classroom_admin.can_update(self.own_classroom))
self.assertFalse(self.own_classroom_coach.can_update(self.own_classroom))
self.assertFalse(self.member.can_update(self.own_classroom))
self.assertFalse(self.anon_user.can_update(self.own_classroom))
def test_facility_users_cannot_update_other_classroom(self):
""" FacilityUsers cannot update other Classrooms, unless they are a facility admin """
self.assertFalse(self.own_classroom_admin.can_update(self.other_classroom))
self.assertFalse(self.own_classroom_coach.can_update(self.other_classroom))
self.assertFalse(self.member.can_update(self.other_classroom))
def test_only_admins_can_delete_own_classroom(self):
""" The only FacilityUsers who can delete a Classroom are admins for the Facility """
self.assertTrue(self.data["facility_admin"].can_delete(self.own_classroom))
self.assertFalse(self.own_classroom_admin.can_delete(self.own_classroom))
self.assertFalse(self.own_classroom_coach.can_delete(self.own_classroom))
self.assertFalse(self.member.can_delete(self.own_classroom))
self.assertFalse(self.anon_user.can_delete(self.own_classroom))
def test_facility_users_cannot_delete_other_classroom(self):
""" FacilityUsers cannot delete other Classrooms, unless they are a facility admin """
self.assertFalse(self.own_classroom_admin.can_delete(self.other_classroom))
self.assertFalse(self.own_classroom_coach.can_delete(self.other_classroom))
self.assertFalse(self.member.can_delete(self.other_classroom))
def test_device_owner_can_do_anything_to_a_classroom(self):
""" DeviceOwner can do anything to a Classroom """
new_classroom_data = {"name": "Home", "parent": self.data["facility"]}
self.assertTrue(self.device_owner.can_create(Classroom, new_classroom_data))
self.assertTrue(self.device_owner.can_read(self.own_classroom))
self.assertTrue(self.device_owner.can_update(self.own_classroom))
self.assertTrue(self.device_owner.can_delete(self.own_classroom))
self.assertSetEqual(set(Classroom.objects.all()), set(self.device_owner.filter_readable(Classroom.objects.all())))
class LearnerGroupPermissionsTestCase(TestCase):
"""
Tests of permissions for reading/modifying LearnerGroup instances
"""
def setUp(self):
self.data = create_dummy_facility_data()
self.member = self.data["learners_one_group"][0][0]
self.own_learnergroup = self.data["learnergroups"][0][0]
self.other_learnergroup = self.data["learnergroups"][1][1]
self.own_classroom = self.data["classrooms"][0]
self.own_classroom_coach = self.data["classroom_coaches"][0]
self.own_classroom_admin = self.data["classroom_admins"][0]
self.other_classroom_admin = self.data["classroom_admins"][1]
self.device_owner = DeviceOwner.objects.create(username="boss")
self.anon_user = KolibriAnonymousUser()
def test_facility_or_classroom_admins_can_create_learnergroup(self):
""" The only FacilityUser who can create a LearnerGroup is a facility admin for the Facility """
new_learnergroup_data = {"name": "Cool Group", "parent": self.own_classroom}
self.assertTrue(self.data["facility_admin"].can_create(LearnerGroup, new_learnergroup_data))
self.assertTrue(self.own_classroom_admin.can_create(LearnerGroup, new_learnergroup_data))
self.assertFalse(self.other_classroom_admin.can_create(LearnerGroup, new_learnergroup_data))
self.assertFalse(self.own_classroom_coach.can_create(LearnerGroup, new_learnergroup_data))
self.assertFalse(self.member.can_create(LearnerGroup, new_learnergroup_data))
self.assertFalse(self.data["unattached_users"][0].can_create(LearnerGroup, new_learnergroup_data))
self.assertFalse(self.anon_user.can_create(LearnerGroup, new_learnergroup_data))
def test_members_can_read_own_learnergroup(self):
""" Members of a LearnerGroup can read that LearnerGroup, as can coaches and admins for the LearnerGroup """
for user in [self.data["facility_admin"], self.own_classroom_coach,
self.own_classroom_admin, self.member]:
self.assertTrue(user.can_read(self.own_learnergroup))
self.assertIn(self.own_learnergroup, user.filter_readable(LearnerGroup.objects.all()))
def test_only_admins_can_update_own_learnergroup(self):
""" The only FacilityUsers who can update a LearnerGroup are admins for that LearnerGroup """
self.assertTrue(self.data["facility_admin"].can_update(self.own_learnergroup))
self.assertTrue(self.own_classroom_admin.can_update(self.own_learnergroup))
self.assertFalse(self.own_classroom_coach.can_update(self.own_learnergroup))
self.assertFalse(self.member.can_update(self.own_learnergroup))
self.assertFalse(self.anon_user.can_update(self.own_learnergroup))
def test_facility_users_cannot_update_other_learnergroup(self):
""" FacilityUsers cannot update other LearnerGroups, unless they are a facility admin """
self.assertFalse(self.own_classroom_admin.can_update(self.other_learnergroup))
self.assertFalse(self.own_classroom_coach.can_update(self.other_learnergroup))
self.assertFalse(self.member.can_update(self.other_learnergroup))
def test_only_admins_can_delete_own_learnergroup(self):
""" The only FacilityUsers who can delete a LearnerGroup are admins for that LearnerGroup """
self.assertTrue(self.data["facility_admin"].can_delete(self.own_learnergroup))
self.assertTrue(self.own_classroom_admin.can_delete(self.own_learnergroup))
self.assertFalse(self.own_classroom_coach.can_delete(self.own_learnergroup))
self.assertFalse(self.member.can_delete(self.own_learnergroup))
self.assertFalse(self.anon_user.can_delete(self.own_learnergroup))
def test_facility_users_cannot_delete_other_learnergroup(self):
""" FacilityUsers cannot delete other LearnerGroups, if they aren't admin for Facility or parent Classroom """
self.assertFalse(self.own_classroom_admin.can_delete(self.other_learnergroup))
self.assertFalse(self.own_classroom_coach.can_delete(self.other_learnergroup))
self.assertFalse(self.member.can_delete(self.other_learnergroup))
def test_device_owner_can_do_anything_to_a_learnergroup(self):
""" DeviceOwner can do anything to a LearnerGroup """
new_learnergroup_data = {"name": "Cool Group", "parent": self.own_classroom}
self.assertTrue(self.device_owner.can_create(LearnerGroup, new_learnergroup_data))
self.assertTrue(self.device_owner.can_read(self.own_learnergroup))
self.assertTrue(self.device_owner.can_update(self.own_learnergroup))
self.assertTrue(self.device_owner.can_delete(self.own_learnergroup))
self.assertSetEqual(set(LearnerGroup.objects.all()), set(self.device_owner.filter_readable(LearnerGroup.objects.all())))
class FacilityUserPermissionsTestCase(TestCase):
"""
Tests of permissions for reading/modifying FacilityUser instances
"""
def setUp(self):
self.data = create_dummy_facility_data()
self.data2 = create_dummy_facility_data()
self.member = self.data["learners_one_group"][0][0]
self.member2 = self.data2["learners_one_group"][0][0]
self.other_member = self.data["learners_one_group"][1][1]
self.own_learnergroup = self.data["learnergroups"][0][0]
self.own_classroom = self.data["classrooms"][0]
self.own_classroom_coach = self.data["classroom_coaches"][0]
self.own_classroom_admin = self.data["classroom_admins"][0]
self.other_classroom_admin = self.data["classroom_admins"][1]
self.device_owner = DeviceOwner.objects.create(username="boss")
self.anon_user = KolibriAnonymousUser()
def test_only_facility_admins_can_create_facility_user(self):
""" The only FacilityUser who can create a FacilityUser is a facility admin for the Facility """
new_facilityuser_data = {"username": "janedoe", "password": "*", "facility": self.data["facility"]}
self.assertTrue(self.data["facility_admin"].can_create(FacilityUser, new_facilityuser_data))
self.assertFalse(self.data["facility_coach"].can_create(FacilityUser, new_facilityuser_data))
self.assertFalse(self.own_classroom_admin.can_create(FacilityUser, new_facilityuser_data))
self.assertFalse(self.own_classroom_coach.can_create(FacilityUser, new_facilityuser_data))
self.assertFalse(self.member.can_create(FacilityUser, new_facilityuser_data))
self.assertFalse(self.data["unattached_users"][0].can_create(FacilityUser, new_facilityuser_data))
self.assertFalse(self.anon_user.can_create(FacilityUser, new_facilityuser_data))
def test_no_facility_user_can_create_facility_user_for_other_facility(self):
""" FacilityUsers cannot create a FacilityUser for a different Facility """
new_facilityuser_data = {"username": "janedoe", "password": "*", "facility": self.data2["facility"]}
self.assertFalse(self.data["facility_admin"].can_create(FacilityUser, new_facilityuser_data))
self.assertFalse(self.data["facility_coach"].can_create(FacilityUser, new_facilityuser_data))
self.assertFalse(self.own_classroom_admin.can_create(FacilityUser, new_facilityuser_data))
self.assertFalse(self.own_classroom_coach.can_create(FacilityUser, new_facilityuser_data))
self.assertFalse(self.member.can_create(FacilityUser, new_facilityuser_data))
self.assertFalse(self.data["unattached_users"][0].can_create(FacilityUser, new_facilityuser_data))
def test_facility_user_can_read_self(self):
""" A FacilityUser can read its own FacilityUser model """
for user in [self.own_classroom_admin, self.member, self.own_classroom_coach, self.data["facility_admin"]]:
self.assertTrue(user.can_read(user))
self.assertIn(user, user.filter_readable(FacilityUser.objects.all()))
def test_admins_and_coaches_can_read_facility_users(self):
""" Users with admin/coach role for a FacilityUser can read that FacilityUser """
for user in [self.own_classroom_admin, self.own_classroom_coach, self.data["facility_admin"], self.data["facility_coach"]]:
self.assertTrue(user.can_read(self.member))
self.assertIn(self.member, user.filter_readable(FacilityUser.objects.all()))
def test_members_and_admins_and_coaches_for_other_classrooms_cannot_read_facility_users(self):
""" Users without admin/coach role for a specific FacilityUser cannot read that FacilityUser """
for user in [self.own_classroom_coach, self.own_classroom_admin, self.member, self.anon_user]:
self.assertFalse(user.can_read(self.other_member))
self.assertNotIn(self.other_member, user.filter_readable(FacilityUser.objects.all()))
def test_only_facility_admins_and_coaches_can_read_unaffiliated_facility_users(self):
""" Only Facility admins/coaches can read FacilityUser that is not a member of a Classroom or LearnerGroup """
orphan = self.data["unattached_users"][0]
for user in [self.data["facility_admin"], self.data["facility_coach"]]:
self.assertTrue(user.can_read(orphan))
self.assertIn(orphan, user.filter_readable(FacilityUser.objects.all()))
for user in [self.own_classroom_coach, self.own_classroom_admin, self.member, self.anon_user]:
self.assertFalse(user.can_read(orphan))
self.assertNotIn(orphan, user.filter_readable(FacilityUser.objects.all()))
def test_facility_user_can_update_self(self):
""" A FacilityUser can update its own FacilityUser model """
self.assertTrue(self.member.can_update(self.member))
self.assertTrue(self.own_classroom_coach.can_update(self.own_classroom_coach))
self.assertTrue(self.own_classroom_admin.can_update(self.own_classroom_admin))
self.assertTrue(self.data["facility_admin"].can_update(self.data["facility_admin"]))
def test_admins_but_not_coaches_can_update_facility_users(self):
""" Users with admin (but not coach) role for a FacilityUser can update that FacilityUser """
self.assertTrue(self.data["facility_admin"].can_update(self.member))
self.assertFalse(self.data["facility_coach"].can_update(self.member))
self.assertTrue(self.own_classroom_admin.can_update(self.member))
self.assertFalse(self.own_classroom_coach.can_update(self.member))
def test_admins_and_coaches_for_other_classrooms_cannot_update_facility_users(self):
""" Users without admin/coach role for a specific FacilityUser cannot update that FacilityUser """
self.assertFalse(self.own_classroom_coach.can_update(self.other_member))
self.assertFalse(self.own_classroom_admin.can_update(self.other_member))
def test_only_facility_admins_can_update_unaffiliated_facility_users(self):
""" Only Facility admins can update FacilityUser that is not a member of a Classroom or LearnerGroup """
orphan = self.data["unattached_users"][0]
self.assertTrue(self.data["facility_admin"].can_update(orphan))
self.assertFalse(self.data["facility_coach"].can_update(orphan))
self.assertFalse(self.own_classroom_admin.can_update(orphan))
self.assertFalse(self.own_classroom_coach.can_update(orphan))
self.assertFalse(self.member.can_update(orphan))
self.assertFalse(self.anon_user.can_update(orphan))
def test_facility_user_can_delete_self(self):
""" A FacilityUser can delete its own FacilityUser model """
self.assertTrue(self.member.can_delete(self.member))
self.assertTrue(self.own_classroom_coach.can_delete(self.own_classroom_coach))
self.assertTrue(self.own_classroom_admin.can_delete(self.own_classroom_admin))
self.assertTrue(self.data["facility_admin"].can_delete(self.data["facility_admin"]))
def test_only_facility_admins_can_delete_facility_user(self):
""" The only FacilityUsers who can delete a FacilityUser are admins for the Facility """
self.assertTrue(self.data["facility_admin"].can_delete(self.member))
self.assertFalse(self.data["facility_coach"].can_delete(self.member))
self.assertFalse(self.own_classroom_admin.can_delete(self.member))
self.assertFalse(self.own_classroom_coach.can_delete(self.member))
self.assertFalse(self.anon_user.can_delete(self.member))
def test_facility_users_cannot_delete_facility_users_from_other_facility(self):
""" FacilityUsers cannot delete FacilityUsers from another Facility """
self.assertFalse(self.data["facility_admin"].can_delete(self.member2))
self.assertFalse(self.data["facility_coach"].can_delete(self.member2))
self.assertFalse(self.own_classroom_admin.can_delete(self.member2))
self.assertFalse(self.own_classroom_coach.can_delete(self.member2))
self.assertFalse(self.member.can_delete(self.member2))
def test_device_owner_can_do_anything_to_a_facility_user(self):
""" DeviceOwner can do anything to a FacilityUser """
new_facilityuser_data_1 = {"username": "janedoe", "password": "*", "facility": self.data["facility"]}
self.assertTrue(self.device_owner.can_create(FacilityUser, new_facilityuser_data_1))
new_facilityuser_data_2 = {"username": "janedoe", "password": "*", "facility": self.data2["facility"]}
self.assertTrue(self.device_owner.can_create(FacilityUser, new_facilityuser_data_2))
self.assertTrue(self.device_owner.can_read(self.member))
self.assertTrue(self.device_owner.can_update(self.member))
self.assertTrue(self.device_owner.can_delete(self.member))
self.assertSetEqual(set(FacilityUser.objects.all()), set(self.device_owner.filter_readable(FacilityUser.objects.all())))
class DeviceOwnerPermissionsTestCase(TestCase):
"""
Tests of permissions for reading/modifying DeviceOwner instances
"""
def setUp(self):
self.data = create_dummy_facility_data()
self.member = self.data["learners_one_group"][0][0]
self.own_classroom_coach = self.data["classroom_coaches"][0]
self.own_classroom_admin = self.data["classroom_admins"][0]
self.device_owner = DeviceOwner.objects.create(username="boss")
self.device_owner2 = DeviceOwner.objects.create(username="ubermensch")
self.anon_user = KolibriAnonymousUser()
def test_non_device_owners_cannot_create_device_owner(self):
""" Users who are not DeviceOwners cannot create a DeviceOwner """
new_deviceowner_data = {"username": "janedoe", "password": "*"}
self.assertFalse(self.data["facility_admin"].can_create(DeviceOwner, new_deviceowner_data))
self.assertFalse(self.data["facility_coach"].can_create(DeviceOwner, new_deviceowner_data))
self.assertFalse(self.own_classroom_admin.can_create(DeviceOwner, new_deviceowner_data))
self.assertFalse(self.own_classroom_coach.can_create(DeviceOwner, new_deviceowner_data))
self.assertFalse(self.member.can_create(DeviceOwner, new_deviceowner_data))
self.assertFalse(self.data["unattached_users"][0].can_create(DeviceOwner, new_deviceowner_data))
self.assertFalse(self.anon_user.can_create(DeviceOwner, new_deviceowner_data))
def test_non_device_owners_cannot_read_device_owner(self):
""" Users who are not DeviceOwners cannot read a DeviceOwner """
for user in [self.data["facility_admin"], self.data["facility_coach"], self.own_classroom_admin,
self.own_classroom_coach, self.member, self.data["unattached_users"][0], self.anon_user]:
self.assertFalse(user.can_read(self.device_owner))
self.assertEqual(len(user.filter_readable(DeviceOwner.objects.all())), 0)
def test_non_device_owners_cannot_update_device_owner(self):
""" Users who are not DeviceOwners cannot update a DeviceOwner """
self.assertFalse(self.data["facility_admin"].can_update(self.device_owner))
self.assertFalse(self.data["facility_coach"].can_update(self.device_owner))
self.assertFalse(self.own_classroom_admin.can_update(self.device_owner))
self.assertFalse(self.own_classroom_coach.can_update(self.device_owner))
self.assertFalse(self.member.can_update(self.device_owner))
self.assertFalse(self.data["unattached_users"][0].can_update(self.device_owner))
self.assertFalse(self.anon_user.can_update(self.device_owner))
def test_non_device_owners_cannot_delete_device_owner(self):
""" Users who are not DeviceOwners cannot delete a DeviceOwner """
self.assertFalse(self.data["facility_admin"].can_delete(self.device_owner))
self.assertFalse(self.data["facility_coach"].can_delete(self.device_owner))
self.assertFalse(self.own_classroom_admin.can_delete(self.device_owner))
self.assertFalse(self.own_classroom_coach.can_delete(self.device_owner))
self.assertFalse(self.member.can_delete(self.device_owner))
self.assertFalse(self.data["unattached_users"][0].can_delete(self.device_owner))
self.assertFalse(self.anon_user.can_delete(self.device_owner))
def test_device_owner_can_do_anything_to_a_device_owner(self):
""" DeviceOwner can do anything to a DeviceOwner """
new_deviceowner_data = {"username": "janedoe", "password": "*"}
self.assertTrue(self.device_owner.can_create(DeviceOwner, new_deviceowner_data))
self.assertTrue(self.device_owner.can_read(self.device_owner))
self.assertTrue(self.device_owner.can_update(self.device_owner))
self.assertTrue(self.device_owner.can_delete(self.device_owner))
self.assertTrue(self.device_owner.can_read(self.device_owner2))
self.assertTrue(self.device_owner.can_update(self.device_owner2))
self.assertTrue(self.device_owner.can_delete(self.device_owner2))
self.assertIn(self.device_owner, self.device_owner.filter_readable(DeviceOwner.objects.all()))
self.assertIn(self.device_owner2, self.device_owner.filter_readable(DeviceOwner.objects.all()))
class RolePermissionsTestCase(TestCase):
"""
Tests of permissions for reading/modifying Role instances
"""
def setUp(self):
self.data = create_dummy_facility_data()
self.member = self.data["learners_one_group"][0][0]
self.own_classroom = self.data["classrooms"][0]
self.other_classroom = self.data["classrooms"][1]
self.own_classroom_coach = self.data["classroom_coaches"][0]
self.own_classroom_admin = self.data["classroom_admins"][0]
self.other_classroom_coach = self.data["classroom_coaches"][1]
self.other_classroom_admin = self.data["classroom_admins"][1]
self.device_owner = DeviceOwner.objects.create(username="boss")
self.role_user = self.data["unattached_users"][0]
self.anon_user = KolibriAnonymousUser()
def test_facility_admin_can_create_facility_admin_role(self):
new_role_data = {"user": self.role_user, "collection": self.data["facility"], "kind": role_kinds.ADMIN}
self.assertTrue(self.data["facility_admin"].can_create(Role, new_role_data))
self.assertFalse(self.data["facility_coach"].can_create(Role, new_role_data))
self.assertFalse(self.own_classroom_admin.can_create(Role, new_role_data))
self.assertFalse(self.own_classroom_coach.can_create(Role, new_role_data))
self.assertFalse(self.member.can_create(Role, new_role_data))
self.assertFalse(self.role_user.can_create(Role, new_role_data))
self.assertTrue(self.device_owner.can_create(Role, new_role_data))
def test_facility_admin_can_create_facility_coach_role(self):
new_role_data = {"user": self.role_user, "collection": self.data["facility"], "kind": role_kinds.COACH}
self.assertTrue(self.data["facility_admin"].can_create(Role, new_role_data))
self.assertFalse(self.data["facility_coach"].can_create(Role, new_role_data))
self.assertFalse(self.own_classroom_admin.can_create(Role, new_role_data))
self.assertFalse(self.own_classroom_coach.can_create(Role, new_role_data))
self.assertFalse(self.member.can_create(Role, new_role_data))
self.assertFalse(self.role_user.can_create(Role, new_role_data))
self.assertTrue(self.device_owner.can_create(Role, new_role_data))
self.assertFalse(self.anon_user.can_create(Role, new_role_data))
def test_facility_or_classroom_admin_can_create_classroom_admin_role(self):
new_role_data = {"user": self.role_user, "collection": self.own_classroom, "kind": role_kinds.ADMIN}
self.assertTrue(self.data["facility_admin"].can_create(Role, new_role_data))
self.assertFalse(self.data["facility_coach"].can_create(Role, new_role_data))
self.assertTrue(self.own_classroom_admin.can_create(Role, new_role_data))
self.assertFalse(self.own_classroom_coach.can_create(Role, new_role_data))
self.assertFalse(self.other_classroom_admin.can_create(Role, new_role_data))
self.assertFalse(self.other_classroom_coach.can_create(Role, new_role_data))
self.assertFalse(self.member.can_create(Role, new_role_data))
self.assertFalse(self.role_user.can_create(Role, new_role_data))
self.assertTrue(self.device_owner.can_create(Role, new_role_data))
self.assertFalse(self.anon_user.can_create(Role, new_role_data))
def test_facility_or_classroom_admin_can_create_classroom_coach_role(self):
new_role_data = {"user": self.role_user, "collection": self.own_classroom, "kind": role_kinds.COACH}
self.assertTrue(self.data["facility_admin"].can_create(Role, new_role_data))
self.assertFalse(self.data["facility_coach"].can_create(Role, new_role_data))
self.assertTrue(self.own_classroom_admin.can_create(Role, new_role_data))
self.assertFalse(self.own_classroom_coach.can_create(Role, new_role_data))
self.assertFalse(self.other_classroom_admin.can_create(Role, new_role_data))
self.assertFalse(self.other_classroom_coach.can_create(Role, new_role_data))
self.assertFalse(self.member.can_create(Role, new_role_data))
self.assertFalse(self.role_user.can_create(Role, new_role_data))
self.assertTrue(self.device_owner.can_create(Role, new_role_data))
self.assertFalse(self.anon_user.can_create(Role, new_role_data))
def test_facility_admin_or_coach_can_read_facility_admin_role(self):
role = Role.objects.create(user=self.role_user, collection=self.data["facility"], kind=role_kinds.ADMIN)
for user in [self.data["facility_admin"], self.data["facility_coach"], self.role_user, self.device_owner]:
self.assertTrue(user.can_read(role))
self.assertIn(role, user.filter_readable(Role.objects.all()))
for user in [self.own_classroom_admin, self.own_classroom_coach, self.other_classroom_admin,
self.other_classroom_coach, self.member, self.anon_user]:
self.assertFalse(user.can_read(role))
self.assertNotIn(role, user.filter_readable(Role.objects.all()))
def test_facility_or_classroom_admin_or_coach_can_read_classroom_admin_role(self):
role = Role.objects.create(user=self.role_user, collection=self.own_classroom, kind=role_kinds.ADMIN)
self.assertTrue(self.data["facility_admin"].can_read(role))
self.assertTrue(self.data["facility_coach"].can_read(role))
self.assertTrue(self.own_classroom_admin.can_read(role))
self.assertTrue(self.own_classroom_coach.can_read(role))
self.assertFalse(self.other_classroom_admin.can_read(role))
self.assertFalse(self.other_classroom_coach.can_read(role))
self.assertFalse(self.member.can_read(role))
self.assertTrue(self.role_user.can_read(role))
self.assertTrue(self.device_owner.can_read(role))
self.assertFalse(self.anon_user.can_read(role))
def test_facility_users_cannot_update_roles(self):
# None of the fields in a role are "mutable", so there's no reason to allow updates
# (changing a role from one kind to another means deleting the existing role and creating another)
role = Role.objects.create(user=self.role_user, collection=self.own_classroom, kind=role_kinds.COACH)
self.assertFalse(self.data["facility_admin"].can_update(role))
self.assertFalse(self.data["facility_coach"].can_update(role))
self.assertFalse(self.own_classroom_admin.can_update(role))
self.assertFalse(self.own_classroom_coach.can_update(role))
self.assertFalse(self.other_classroom_admin.can_update(role))
self.assertFalse(self.other_classroom_coach.can_update(role))
self.assertFalse(self.member.can_update(role))
self.assertFalse(self.role_user.can_update(role))
self.assertFalse(self.anon_user.can_update(role))
def test_facility_admin_can_delete_facility_admin_role(self):
role = Role.objects.create(user=self.role_user, collection=self.data["facility"], kind=role_kinds.ADMIN)
self.assertTrue(self.data["facility_admin"].can_delete(role))
self.assertFalse(self.data["facility_coach"].can_delete(role))
self.assertFalse(self.own_classroom_admin.can_delete(role))
self.assertFalse(self.own_classroom_coach.can_delete(role))
self.assertFalse(self.member.can_delete(role))
self.assertTrue(self.role_user.can_delete(role))
self.assertTrue(self.device_owner.can_delete(role))
self.assertFalse(self.anon_user.can_delete(role))
def test_facility_admin_can_delete_facility_coach_role(self):
role = Role.objects.create(user=self.role_user, collection=self.data["facility"], kind=role_kinds.COACH)
self.assertTrue(self.data["facility_admin"].can_delete(role))
self.assertFalse(self.data["facility_coach"].can_delete(role))
self.assertFalse(self.own_classroom_admin.can_delete(role))
self.assertFalse(self.own_classroom_coach.can_delete(role))
self.assertFalse(self.member.can_delete(role))
self.assertFalse(self.role_user.can_delete(role))
self.assertTrue(self.device_owner.can_delete(role))
self.assertFalse(self.anon_user.can_delete(role))
def test_facility_or_classroom_admin_can_delete_classroom_admin_role(self):
role = Role.objects.create(user=self.role_user, collection=self.own_classroom, kind=role_kinds.ADMIN)
self.assertTrue(self.data["facility_admin"].can_delete(role))
self.assertFalse(self.data["facility_coach"].can_delete(role))
self.assertTrue(self.own_classroom_admin.can_delete(role))
self.assertFalse(self.own_classroom_coach.can_delete(role))
self.assertFalse(self.other_classroom_admin.can_delete(role))
self.assertFalse(self.other_classroom_coach.can_delete(role))
self.assertFalse(self.member.can_delete(role))
self.assertTrue(self.role_user.can_delete(role)) # the role's user can delete it as she is an admin for collection
self.assertTrue(self.device_owner.can_delete(role))
self.assertFalse(self.anon_user.can_delete(role))
def test_facility_or_classroom_admin_can_delete_classroom_coach_role(self):
role = Role.objects.create(user=self.role_user, collection=self.own_classroom, kind=role_kinds.COACH)
self.assertTrue(self.data["facility_admin"].can_delete(role))
self.assertFalse(self.data["facility_coach"].can_delete(role))
self.assertTrue(self.own_classroom_admin.can_delete(role))
self.assertFalse(self.own_classroom_coach.can_delete(role))
self.assertFalse(self.other_classroom_admin.can_delete(role))
self.assertFalse(self.other_classroom_coach.can_delete(role))
self.assertFalse(self.member.can_delete(role))
self.assertFalse(self.role_user.can_delete(role))
self.assertTrue(self.device_owner.can_delete(role))
self.assertFalse(self.anon_user.can_delete(role))
class MembershipPermissionsTestCase(TestCase):
"""
Tests of permissions for reading/modifying Membership instances
"""
def setUp(self):
self.data = create_dummy_facility_data()
self.member = self.data["learners_one_group"][0][0]
self.own_classroom = self.data["classrooms"][0]
self.other_classroom = self.data["classrooms"][1]
self.own_learnergroup = self.data["learnergroups"][0][0]
self.other_learnergroup = self.data["learnergroups"][1][1]
self.own_classroom_coach = self.data["classroom_coaches"][0]
self.own_classroom_admin = self.data["classroom_admins"][0]
self.other_classroom_coach = self.data["classroom_coaches"][1]
self.other_classroom_admin = self.data["classroom_admins"][1]
self.device_owner = DeviceOwner.objects.create(username="boss")
self.anon_user = KolibriAnonymousUser()
def test_only_admin_for_user_can_create_membership(self):
# try adding member of own_classroom as a member of other_classroom
new_membership_data = {"user": self.member, "collection": self.other_learnergroup}
self.assertTrue(self.data["facility_admin"].can_create(Membership, new_membership_data))
self.assertFalse(self.data["facility_coach"].can_create(Membership, new_membership_data))
self.assertTrue(self.own_classroom_admin.can_create(Membership, new_membership_data))
self.assertFalse(self.own_classroom_coach.can_create(Membership, new_membership_data))
self.assertFalse(self.other_classroom_admin.can_create(Membership, new_membership_data))
self.assertFalse(self.other_classroom_coach.can_create(Membership, new_membership_data))
self.assertFalse(self.member.can_create(Membership, new_membership_data))
self.assertTrue(self.device_owner.can_create(Membership, new_membership_data))
self.assertFalse(self.anon_user.can_create(Membership, new_membership_data))
def test_facility_or_classroom_admin_or_coach_or_member_can_read_membership(self):
membership = Membership.objects.get(user=self.member, collection=self.own_learnergroup)
for user in [self.data["facility_admin"], self.data["facility_coach"], self.own_classroom_admin,
self.own_classroom_coach, self.member, self.device_owner]:
self.assertTrue(user.can_read(membership))
self.assertIn(membership, user.filter_readable(Membership.objects.all()))
for user in [self.other_classroom_admin, self.other_classroom_coach, self.anon_user]:
self.assertFalse(user.can_read(membership))
self.assertNotIn(membership, user.filter_readable(Membership.objects.all()))
def test_facility_users_cannot_update_memberships(self):
# None of the fields in a Membership are "mutable", so there's no reason to allow updates
membership = Membership.objects.get(user=self.member, collection=self.own_learnergroup)
self.assertFalse(self.data["facility_admin"].can_update(membership))
self.assertFalse(self.data["facility_coach"].can_update(membership))
self.assertFalse(self.own_classroom_admin.can_update(membership))
self.assertFalse(self.own_classroom_coach.can_update(membership))
self.assertFalse(self.other_classroom_admin.can_update(membership))
self.assertFalse(self.other_classroom_coach.can_update(membership))
self.assertFalse(self.member.can_update(membership))
self.assertFalse(self.anon_user.can_update(membership))
def test_admin_can_delete_membership(self):
membership = Membership.objects.get(user=self.member, collection=self.own_learnergroup)
self.assertTrue(self.data["facility_admin"].can_delete(membership))
self.assertFalse(self.data["facility_coach"].can_delete(membership))
self.assertTrue(self.own_classroom_admin.can_delete(membership))
self.assertFalse(self.own_classroom_coach.can_delete(membership))
self.assertFalse(self.member.can_delete(membership))
self.assertTrue(self.device_owner.can_delete(membership))
self.assertFalse(self.anon_user.can_delete(membership))
| |
import operator
import math
__version__ = "2.1.0"
m = [
[3.2406, -1.5372, -0.4986],
[-0.9689, 1.8758, 0.0415],
[0.0557, -0.2040, 1.0570]
]
m_inv = [
[0.4124, 0.3576, 0.1805],
[0.2126, 0.7152, 0.0722],
[0.0193, 0.1192, 0.9505]
]
# Hard-coded D65 illuminant
refX = 0.95047
refY = 1.00000
refZ = 1.08883
refU = 0.19784
refV = 0.46834
lab_e = 0.008856
lab_k = 903.3
# Public API
def husl_to_rgb(h, s, l):
return lch_to_rgb(*husl_to_lch([h, s, l]))
def husl_to_hex(h, s, l):
return rgb_to_hex(husl_to_rgb(h, s, l))
def rgb_to_husl(r, g, b):
return lch_to_husl(rgb_to_lch(r, g, b))
def hex_to_husl(hex):
return rgb_to_husl(*hex_to_rgb(hex))
def huslp_to_rgb(h, s, l):
return lch_to_rgb(*huslp_to_lch([h, s, l]))
def huslp_to_hex(h, s, l):
return rgb_to_hex(huslp_to_rgb(h, s, l))
def rgb_to_huslp(r, g, b):
return lch_to_huslp(rgb_to_lch(r, g, b))
def hex_to_huslp(hex):
return rgb_to_huslp(*hex_to_rgb(hex))
def lch_to_rgb(l, c, h):
return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))
def rgb_to_lch(r, g, b):
return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))
def max_chroma(L, H):
hrad = math.radians(H)
sinH = (math.sin(hrad))
cosH = (math.cos(hrad))
sub1 = (math.pow(L + 16, 3.0) / 1560896.0)
sub2 = sub1 if sub1 > 0.008856 else (L / 903.3)
result = float("inf")
for row in m:
m1 = row[0]
m2 = row[1]
m3 = row[2]
top = ((0.99915 * m1 + 1.05122 * m2 + 1.14460 * m3) * sub2)
rbottom = (0.86330 * m3 - 0.17266 * m2)
lbottom = (0.12949 * m3 - 0.38848 * m1)
bottom = (rbottom * sinH + lbottom * cosH) * sub2
for t in (0.0, 1.0):
C = (L * (top - 1.05122 * t) / (bottom + 0.17266 * sinH * t))
if C > 0.0 and C < result:
result = C
return result
def _hrad_extremum(L):
lhs = (math.pow(L, 3.0) + 48.0 * math.pow(L, 2.0) + 768.0 * L + 4096.0) / 1560896.0
rhs = 1107.0 / 125000.0
sub = lhs if lhs > rhs else 10.0 * L / 9033.0
chroma = float("inf")
result = None
for row in m:
for limit in (0.0, 1.0):
[m1, m2, m3] = row
top = -3015466475.0 * m3 * sub + 603093295.0 * m2 * sub - 603093295.0 * limit
bottom = 1356959916.0 * m1 * sub - 452319972.0 * m3 * sub
hrad = math.atan2(top, bottom)
# This is a math hack to deal with tan quadrants, I'm too lazy to figure
# out how to do this properly
if limit == 0.0:
hrad += math.pi
test = max_chroma(L, math.degrees(hrad))
if test < chroma:
chroma = test
result = hrad
return result
def max_chroma_pastel(L):
H = math.degrees(_hrad_extremum(L))
return max_chroma(L, H)
def dot_product(a, b):
return sum(map(operator.mul, a, b))
def f(t):
if t > lab_e:
return (math.pow(t, 1.0 / 3.0))
else:
return (7.787 * t + 16.0 / 116.0)
def f_inv(t):
if math.pow(t, 3.0) > lab_e:
return (math.pow(t, 3.0))
else:
return (116.0 * t - 16.0) / lab_k
def from_linear(c):
if c <= 0.0031308:
return 12.92 * c
else:
return (1.055 * math.pow(c, 1.0 / 2.4) - 0.055)
def to_linear(c):
a = 0.055
if c > 0.04045:
return (math.pow((c + a) / (1.0 + a), 2.4))
else:
return (c / 12.92)
def rgb_prepare(triple):
ret = []
for ch in triple:
ch = round(ch, 3)
if ch < -0.0001 or ch > 1.0001:
raise Exception("Illegal RGB value %f" % ch)
if ch < 0:
ch = 0
if ch > 1:
ch = 1
# Fix for Python 3 which by default rounds 4.5 down to 4.0
# instead of Python 2 which is rounded to 5.0 which caused
# a couple off by one errors in the tests. Tests now all pass
# in Python 2 and Python 3
ret.append(int(round(ch * 255 + 0.001, 0)))
return ret
def hex_to_rgb(hex):
if hex.startswith('#'):
hex = hex[1:]
r = int(hex[0:2], 16) / 255.0
g = int(hex[2:4], 16) / 255.0
b = int(hex[4:6], 16) / 255.0
return [r, g, b]
def rgb_to_hex(triple):
[r, g, b] = triple
return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))
def xyz_to_rgb(triple):
xyz = map(lambda row: dot_product(row, triple), m)
return list(map(from_linear, xyz))
def rgb_to_xyz(triple):
rgbl = list(map(to_linear, triple))
return list(map(lambda row: dot_product(row, rgbl), m_inv))
def xyz_to_luv(triple):
X, Y, Z = triple
if X == Y == Z == 0.0:
return [0.0, 0.0, 0.0]
varU = (4.0 * X) / (X + (15.0 * Y) + (3.0 * Z))
varV = (9.0 * Y) / (X + (15.0 * Y) + (3.0 * Z))
L = 116.0 * f(Y / refY) - 16.0
# Black will create a divide-by-zero error
if L == 0.0:
return [0.0, 0.0, 0.0]
U = 13.0 * L * (varU - refU)
V = 13.0 * L * (varV - refV)
return [L, U, V]
def luv_to_xyz(triple):
L, U, V = triple
if L == 0:
return [0.0, 0.0, 0.0]
varY = f_inv((L + 16.0) / 116.0)
varU = U / (13.0 * L) + refU
varV = V / (13.0 * L) + refV
Y = varY * refY
X = 0.0 - (9.0 * Y * varU) / ((varU - 4.0) * varV - varU * varV)
Z = (9.0 * Y - (15.0 * varV * Y) - (varV * X)) / (3.0 * varV)
return [X, Y, Z]
def luv_to_lch(triple):
L, U, V = triple
C = (math.pow(math.pow(U, 2) + math.pow(V, 2), (1.0 / 2.0)))
hrad = (math.atan2(V, U))
H = math.degrees(hrad)
if H < 0.0:
H = 360.0 + H
return [L, C, H]
def lch_to_luv(triple):
L, C, H = triple
Hrad = math.radians(H)
U = (math.cos(Hrad) * C)
V = (math.sin(Hrad) * C)
return [L, U, V]
def husl_to_lch(triple):
H, S, L = triple
if L > 99.9999999:
return [100, 0.0, H]
if L < 0.00000001:
return [0.0, 0.0, H]
mx = max_chroma(L, H)
C = mx / 100.0 * S
return [L, C, H]
def lch_to_husl(triple):
L, C, H = triple
if L > 99.9999999:
return [H, 0.0, 100.0]
if L < 0.00000001:
return [H, 0.0, 0.0]
mx = max_chroma(L, H)
S = C / mx * 100.0
return [H, S, L]
def huslp_to_lch(triple):
H, S, L = triple
if L > 99.9999999:
return [100, 0.0, H]
if L < 0.00000001:
return [0.0, 0.0, H]
mx = max_chroma_pastel(L)
C = mx / 100.0 * S
return [L, C, H]
def lch_to_huslp(triple):
L, C, H = triple
if L > 99.9999999:
return [H, 0.0, 100.0]
if L < 0.00000001:
return [H, 0.0, 0.0]
mx = max_chroma_pastel(L)
S = C / mx * 100.0
return [H, S, L]
| |
##
# Copyright (c) 2011-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twistedcaldav.test.util import StoreTestCase
from calendarserver.push.notifier import PushDistributor
from calendarserver.push.notifier import getPubSubAPSConfiguration
from calendarserver.push.notifier import PushNotificationWork
from twisted.internet.defer import inlineCallbacks, succeed
from twistedcaldav.config import ConfigDict
from txdav.common.datastore.test.util import populateCalendarsFrom
from txdav.common.datastore.sql_tables import _BIND_MODE_WRITE
from calendarserver.push.util import PushPriority
from txdav.idav import ChangeCategory
from twext.enterprise.jobqueue import JobItem
from twisted.internet import reactor
class StubService(object):
def __init__(self):
self.reset()
def reset(self):
self.history = []
def enqueue(self, transaction, id, dataChangedTimestamp=None,
priority=None):
self.history.append((id, priority))
return(succeed(None))
class PushDistributorTests(StoreTestCase):
@inlineCallbacks
def test_enqueue(self):
stub = StubService()
dist = PushDistributor([stub])
yield dist.enqueue(None, "testing", PushPriority.high)
self.assertEquals(stub.history, [("testing", PushPriority.high)])
def test_getPubSubAPSConfiguration(self):
config = ConfigDict({
"EnableSSL" : True,
"ServerHostName" : "calendars.example.com",
"SSLPort" : 8443,
"HTTPPort" : 8008,
"Notifications" : {
"Services" : {
"APNS" : {
"CalDAV" : {
"Topic" : "test topic",
},
"SubscriptionRefreshIntervalSeconds" : 42,
"SubscriptionURL" : "apns",
"Environment" : "prod",
"Enabled" : True,
},
},
},
})
result = getPubSubAPSConfiguration(("CalDAV", "foo",), config)
self.assertEquals(
result,
{
"SubscriptionRefreshIntervalSeconds": 42,
"SubscriptionURL": "https://calendars.example.com:8443/apns",
"APSBundleID": "test topic",
"APSEnvironment": "prod"
}
)
class StubDistributor(object):
def __init__(self):
self.reset()
def reset(self):
self.history = []
def enqueue(self, transaction, pushID, dataChangedTimestamp=None,
priority=None):
self.history.append((pushID, priority))
return(succeed(None))
class PushNotificationWorkTests(StoreTestCase):
@inlineCallbacks
def test_work(self):
self.patch(JobItem, "failureRescheduleInterval", 2)
pushDistributor = StubDistributor()
def decorateTransaction(txn):
txn._pushDistributor = pushDistributor
self._sqlCalendarStore.callWithNewTransactions(decorateTransaction)
txn = self._sqlCalendarStore.newTransaction()
yield txn.enqueue(PushNotificationWork,
pushID="/CalDAV/localhost/foo/",
pushPriority=PushPriority.high.value
)
yield txn.commit()
yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60)
self.assertEquals(pushDistributor.history,
[("/CalDAV/localhost/foo/", PushPriority.high)])
pushDistributor.reset()
txn = self._sqlCalendarStore.newTransaction()
yield txn.enqueue(PushNotificationWork,
pushID="/CalDAV/localhost/bar/",
pushPriority=PushPriority.high.value
)
yield txn.enqueue(PushNotificationWork,
pushID="/CalDAV/localhost/bar/",
pushPriority=PushPriority.high.value
)
yield txn.enqueue(PushNotificationWork,
pushID="/CalDAV/localhost/bar/",
pushPriority=PushPriority.high.value
)
# Enqueue a different pushID to ensure those are not grouped with
# the others:
yield txn.enqueue(PushNotificationWork,
pushID="/CalDAV/localhost/baz/",
pushPriority=PushPriority.high.value
)
yield txn.commit()
yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60)
self.assertEquals(set(pushDistributor.history),
set([("/CalDAV/localhost/bar/", PushPriority.high),
("/CalDAV/localhost/baz/", PushPriority.high)]))
# Ensure only the high-water-mark priority push goes out, by
# enqueuing low, medium, and high notifications
pushDistributor.reset()
txn = self._sqlCalendarStore.newTransaction()
yield txn.enqueue(PushNotificationWork,
pushID="/CalDAV/localhost/bar/",
pushPriority=PushPriority.low.value
)
yield txn.enqueue(PushNotificationWork,
pushID="/CalDAV/localhost/bar/",
pushPriority=PushPriority.high.value
)
yield txn.enqueue(PushNotificationWork,
pushID="/CalDAV/localhost/bar/",
pushPriority=PushPriority.medium.value
)
yield txn.commit()
yield JobItem.waitEmpty(self.storeUnderTest().newTransaction, reactor, 60)
self.assertEquals(pushDistributor.history,
[("/CalDAV/localhost/bar/", PushPriority.high)])
class NotifierFactory(StoreTestCase):
requirements = {
"user01" : {
"calendar_1" : {}
},
"user02" : {
"calendar_1" : {}
},
}
@inlineCallbacks
def populate(self):
# Need to bypass normal validation inside the store
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
def test_storeInit(self):
self.assertTrue("push" in self._sqlCalendarStore._notifierFactories)
@inlineCallbacks
def test_homeNotifier(self):
home = yield self.homeUnderTest(name="user01")
yield home.notifyChanged(category=ChangeCategory.default)
self.assertEquals(self.notifierFactory.history,
[("/CalDAV/example.com/user01/", PushPriority.high)])
yield self.commit()
@inlineCallbacks
def test_calendarNotifier(self):
calendar = yield self.calendarUnderTest(home="user01")
yield calendar.notifyChanged(category=ChangeCategory.default)
self.assertEquals(
set(self.notifierFactory.history),
set([
("/CalDAV/example.com/user01/", PushPriority.high),
("/CalDAV/example.com/user01/calendar_1/", PushPriority.high)])
)
yield self.commit()
@inlineCallbacks
def test_shareWithNotifier(self):
calendar = yield self.calendarUnderTest(home="user01")
yield calendar.inviteUserToShare("user02", _BIND_MODE_WRITE, "")
self.assertEquals(
set(self.notifierFactory.history),
set([
("/CalDAV/example.com/user01/", PushPriority.high),
("/CalDAV/example.com/user01/calendar_1/", PushPriority.high),
("/CalDAV/example.com/user02/", PushPriority.high),
("/CalDAV/example.com/user02/notification/", PushPriority.high),
])
)
yield self.commit()
calendar = yield self.calendarUnderTest(home="user01")
yield calendar.uninviteUserFromShare("user02")
self.assertEquals(
set(self.notifierFactory.history),
set([
("/CalDAV/example.com/user01/", PushPriority.high),
("/CalDAV/example.com/user01/calendar_1/", PushPriority.high),
("/CalDAV/example.com/user02/", PushPriority.high),
("/CalDAV/example.com/user02/notification/", PushPriority.high),
])
)
yield self.commit()
@inlineCallbacks
def test_sharedCalendarNotifier(self):
calendar = yield self.calendarUnderTest(home="user01")
shareeView = yield calendar.inviteUserToShare("user02", _BIND_MODE_WRITE, "")
yield shareeView.acceptShare("")
shareName = shareeView.name()
yield self.commit()
self.notifierFactory.reset()
shared = yield self.calendarUnderTest(home="user02", name=shareName)
yield shared.notifyChanged(category=ChangeCategory.default)
self.assertEquals(
set(self.notifierFactory.history),
set([
("/CalDAV/example.com/user01/", PushPriority.high),
("/CalDAV/example.com/user01/calendar_1/", PushPriority.high)])
)
yield self.commit()
@inlineCallbacks
def test_notificationNotifier(self):
notifications = yield self.transactionUnderTest().notificationsWithUID("user01")
yield notifications.notifyChanged(category=ChangeCategory.default)
self.assertEquals(
set(self.notifierFactory.history),
set([
("/CalDAV/example.com/user01/", PushPriority.high),
("/CalDAV/example.com/user01/notification/", PushPriority.high)])
)
yield self.commit()
| |
import sys
import json
import asyncio
from typing import Optional, NoReturn
import aioxmpp
import aioxmpp.connector
import aioxmpp.xso
import OpenSSL
from aiofcm.logging import logger
from aiofcm.common import Message, MessageResponse, STATUS_SUCCESS
from aiofcm.exceptions import ConnectionClosed
class FCMMessage(aioxmpp.xso.XSO):
TAG = ('google:mobile:data', 'gcm')
text = aioxmpp.xso.Text(default=None)
aioxmpp.stanza.Message.fcm_payload = aioxmpp.xso.Child([FCMMessage])
class FCMMessageType:
ACK = 'ack'
NACK = 'nack'
class FCMXMPPConnection:
FCM_HOST = 'fcm-xmpp.googleapis.com'
FCM_PORT = 5235
INACTIVITY_TIME = 10
def __init__(self, sender_id, api_key, loop=None, max_requests=1000):
self.max_requests = max_requests
self.xmpp_client = self._create_client(sender_id, api_key, loop)
self.loop = loop
self._wait_connection = asyncio.Future()
self.inactivity_timer = None
self.requests = {}
def _create_client(self, sender_id, api_key, loop=None) -> aioxmpp.Client:
xmpp_client = aioxmpp.Client(
local_jid=aioxmpp.JID.fromstr('%s@gcm.googleapis.com' % sender_id),
security_layer=aioxmpp.make_security_layer(api_key),
override_peer=[
(self.FCM_HOST, self.FCM_PORT,
aioxmpp.connector.XMPPOverTLSConnector())
],
loop=loop
)
xmpp_client.on_stream_established.connect(
lambda: self._wait_connection.set_result(True)
)
xmpp_client.on_stream_destroyed.connect(
self._on_stream_destroyed
)
xmpp_client.on_failure.connect(
lambda exc: self._wait_connection.set_exception(exc)
)
xmpp_client.stream.register_message_callback(
type_=aioxmpp.MessageType.NORMAL,
from_=None,
cb=self.on_response
)
return xmpp_client
@property
def connected(self):
return self.xmpp_client.running
async def connect(self):
self.xmpp_client.start()
await self._wait_connection
self.refresh_inactivity_timer()
def close(self):
if self.inactivity_timer:
self.inactivity_timer.cancel()
logger.debug('Closing connection %s', self)
self.xmpp_client.stop()
def _on_stream_destroyed(self, reason=None):
reason = reason or ConnectionClosed()
logger.debug('Stream of %s was destroyed: %s', self, reason)
self.xmpp_client.stop()
if self.inactivity_timer:
self.inactivity_timer.cancel()
for request in self.requests.values():
if not request.done():
request.set_exception(reason)
def on_response(self, message):
self.refresh_inactivity_timer()
body = json.loads(message.fcm_payload.text)
try:
message_id = body['message_id']
message_type = body['message_type']
except KeyError:
logger.warning('Got strange response: %s', body)
return
if message_type not in (FCMMessageType.ACK, FCMMessageType.NACK):
return
request = self.requests.pop(message_id, None)
if not request:
logger.warning('Got response for unknown message %s', message_id)
return
if message_type == FCMMessageType.ACK:
result = MessageResponse(message_id, STATUS_SUCCESS)
request.set_result(result)
elif message_type == FCMMessageType.NACK:
status = body['error']
description = body['error_description']
result = MessageResponse(message_id, status, description)
request.set_result(result)
async def send_message(self, message):
if not self.connected:
await self.connect()
msg = aioxmpp.Message(
type_=aioxmpp.MessageType.NORMAL
)
payload = FCMMessage()
payload_body = message.as_dict()
payload.text = json.dumps(payload_body)
msg.fcm_payload = payload
future_response = asyncio.Future()
self.requests[message.message_id] = future_response
self.refresh_inactivity_timer()
try:
await self.xmpp_client.stream.send(msg)
except Exception:
self.requests.pop(message.message_id)
raise
response = await future_response
return response
def refresh_inactivity_timer(self):
if self.inactivity_timer:
self.inactivity_timer.cancel()
self.inactivity_timer = self.loop.call_later(
self.INACTIVITY_TIME, self.close)
@property
def is_busy(self):
return len(self.requests) >= self.max_requests
class FCMConnectionPool:
MAX_ATTEMPTS = 10
def __init__(self, sender_id, api_key, max_connections=10, loop=None):
# type: (int, str, int, Optional[asyncio.AbstractEventLoop]) -> NoReturn
self.sender_id = sender_id
self.api_key = api_key
self.max_connections = max_connections
self.loop = loop or asyncio.get_event_loop()
self.connections = []
# Python 3.10+ does not use the "loop" parameter
if sys.hexversion >= 0x030A00F0:
self._lock = asyncio.Lock()
else:
self._lock = asyncio.Lock(loop=self.loop)
self.loop.set_exception_handler(self.__exception_handler)
async def connect(self) -> FCMXMPPConnection:
connection = FCMXMPPConnection(
sender_id=self.sender_id,
api_key=self.api_key,
loop=self.loop,
)
await connection.connect()
logger.info('Connection established (total: %d)',
len(self.connections) + 1)
return connection
def close(self):
for connection in self.connections:
connection.close()
async def create_connection(self):
connection = await self.connect()
self.connections.append(connection)
async def acquire(self) -> FCMXMPPConnection:
for connection in self.connections:
if not connection.is_busy:
return connection
else:
await self._lock.acquire()
for connection in self.connections:
if not connection.is_busy:
self._lock.release()
return connection
if len(self.connections) < self.max_connections:
try:
connection = await self.connect()
except Exception as e:
logger.error('Could not connect to server: %s', e)
self._lock.release()
raise ConnectionError
self.connections.append(connection)
self._lock.release()
return connection
else:
self._lock.release()
while True:
await asyncio.sleep(0.01)
for connection in self.connections:
if not connection.is_busy:
return connection
async def send_message(self, message: Message) -> MessageResponse:
attempt = 0
while True:
attempt += 1
if attempt > self.MAX_ATTEMPTS:
logger.warning('Trying to send message %s: attempt #%s',
message.message_id, attempt)
logger.debug('Message %s: waiting for connection',
message.message_id)
try:
connection = await self.acquire()
except ConnectionError:
logger.warning('Could not send notification %s due to '
'connection problem', message.message_id)
await asyncio.sleep(1)
continue
logger.debug('Message %s: connection %s acquired',
message.message_id, connection)
try:
response = await connection.send_message(message)
return response
except ConnectionClosed:
logger.warning('Could not send message %s: '
'ConnectionClosed', message.message_id)
except Exception as e:
logger.error('Could not send message %s: %s',
message.message_id, e)
@staticmethod
def __exception_handler(_, context):
exc = context.get('exception')
if not isinstance(exc, OpenSSL.SSL.SysCallError):
logger.exception(exc)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class FailoverGroupsOperations(object):
"""FailoverGroupsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for the request. Constant value: "2015-05-01-preview".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-05-01-preview"
self.config = config
def get(
self, resource_group_name, server_name, failover_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets a failover group.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server containing the failover
group.
:type server_name: str
:param failover_group_name: The name of the failover group.
:type failover_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`FailoverGroup <azure.mgmt.sql.models.FailoverGroup>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/failoverGroups/{failoverGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'failoverGroupName': self._serialize.url("failover_group_name", failover_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FailoverGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, server_name, failover_group_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a failover group.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server containing the failover
group.
:type server_name: str
:param failover_group_name: The name of the failover group.
:type failover_group_name: str
:param parameters: The failover group parameters.
:type parameters: :class:`FailoverGroup
<azure.mgmt.sql.models.FailoverGroup>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`FailoverGroup
<azure.mgmt.sql.models.FailoverGroup>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/failoverGroups/{failoverGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'failoverGroupName': self._serialize.url("failover_group_name", failover_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FailoverGroup')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FailoverGroup', response)
if response.status_code == 201:
deserialized = self._deserialize('FailoverGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, server_name, failover_group_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a failover group.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server containing the failover
group.
:type server_name: str
:param failover_group_name: The name of the failover group.
:type failover_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/failoverGroups/{failoverGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'failoverGroupName': self._serialize.url("failover_group_name", failover_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def update(
self, resource_group_name, server_name, failover_group_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Updates a failover group.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server containing the failover
group.
:type server_name: str
:param failover_group_name: The name of the failover group.
:type failover_group_name: str
:param parameters: The failover group parameters.
:type parameters: :class:`FailoverGroup
<azure.mgmt.sql.models.FailoverGroup>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`FailoverGroup
<azure.mgmt.sql.models.FailoverGroup>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/failoverGroups/{failoverGroupName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'failoverGroupName': self._serialize.url("failover_group_name", failover_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FailoverGroup')
# Construct and send request
def long_running_send():
request = self._client.patch(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FailoverGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_by_server(
self, resource_group_name, server_name, custom_headers=None, raw=False, **operation_config):
"""Lists the failover groups in a server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server containing the failover
group.
:type server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`FailoverGroupPaged
<azure.mgmt.sql.models.FailoverGroupPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/failoverGroups'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.FailoverGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.FailoverGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def failover(
self, resource_group_name, server_name, failover_group_name, custom_headers=None, raw=False, **operation_config):
"""Fails over from the current primary server to this server.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server containing the failover
group.
:type server_name: str
:param failover_group_name: The name of the failover group.
:type failover_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`FailoverGroup
<azure.mgmt.sql.models.FailoverGroup>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/failoverGroups/{failoverGroupName}/failover'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'failoverGroupName': self._serialize.url("failover_group_name", failover_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FailoverGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def force_failover_allow_data_loss(
self, resource_group_name, server_name, failover_group_name, custom_headers=None, raw=False, **operation_config):
"""Fails over from the current primary server to this server. This
operation might result in data loss.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server containing the failover
group.
:type server_name: str
:param failover_group_name: The name of the failover group.
:type failover_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`FailoverGroup
<azure.mgmt.sql.models.FailoverGroup>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/failoverGroups/{failoverGroupName}/forceFailoverAllowDataLoss'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'failoverGroupName': self._serialize.url("failover_group_name", failover_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FailoverGroup', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Controller coordinates sampling and training model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
import numpy as np
import pickle
import random
flags = tf.flags
gfile = tf.gfile
FLAGS = flags.FLAGS
def find_best_eps_lambda(rewards, lengths):
"""Find the best lambda given a desired epsilon = FLAGS.max_divergence."""
# perhaps not the best way to do this
desired_div = FLAGS.max_divergence * np.mean(lengths)
def calc_divergence(eps_lambda):
max_reward = np.max(rewards)
logz = (max_reward / eps_lambda +
np.log(np.mean(np.exp((rewards - max_reward) / eps_lambda))))
exprr = np.mean(np.exp(rewards / eps_lambda - logz) *
rewards / eps_lambda)
return exprr - logz
left = 0.0
right = 1000.0
if len(rewards) <= 8:
return (left + right) / 2
num_iter = max(4, 1 + int(np.log((right - left) / 0.1) / np.log(2.0)))
for _ in xrange(num_iter):
mid = (left + right) / 2
cur_div = calc_divergence(mid)
if cur_div > desired_div:
left = mid
else:
right = mid
return (left + right) / 2
class Controller(object):
def __init__(self, env, env_spec, internal_dim,
use_online_batch=True,
batch_by_steps=False,
unify_episodes=False,
replay_batch_size=None,
max_step=None,
cutoff_agent=1,
save_trajectories_file=None,
use_trust_region=False,
use_value_opt=False,
update_eps_lambda=False,
prioritize_by='rewards',
get_model=None,
get_replay_buffer=None,
get_buffer_seeds=None):
self.env = env
self.env_spec = env_spec
self.internal_dim = internal_dim
self.use_online_batch = use_online_batch
self.batch_by_steps = batch_by_steps
self.unify_episodes = unify_episodes
self.replay_batch_size = replay_batch_size
self.max_step = max_step
self.cutoff_agent = cutoff_agent
self.save_trajectories_file = save_trajectories_file
self.use_trust_region = use_trust_region
self.use_value_opt = use_value_opt
self.update_eps_lambda = update_eps_lambda
self.prioritize_by = prioritize_by
self.model = get_model()
self.replay_buffer = get_replay_buffer()
self.seed_replay_buffer(get_buffer_seeds())
self.internal_state = np.array([self.initial_internal_state()] *
len(self.env))
self.last_obs = self.env_spec.initial_obs(len(self.env))
self.last_act = self.env_spec.initial_act(len(self.env))
self.last_pad = np.zeros(len(self.env))
self.start_episode = np.array([True] * len(self.env))
self.step_count = np.array([0] * len(self.env))
self.episode_running_rewards = np.zeros(len(self.env))
self.episode_running_lengths = np.zeros(len(self.env))
self.episode_rewards = []
self.greedy_episode_rewards = []
self.episode_lengths = []
self.total_rewards = []
self.best_batch_rewards = None
def setup(self, train=True):
self.model.setup(train=train)
def initial_internal_state(self):
return np.zeros(self.model.policy.rnn_state_dim)
def _sample_episodes(self, sess, greedy=False):
"""Sample episodes from environment using model."""
# reset environments as necessary
obs_after_reset = self.env.reset_if(self.start_episode)
for i, obs in enumerate(obs_after_reset):
if obs is not None:
self.step_count[i] = 0
self.internal_state[i] = self.initial_internal_state()
for j in xrange(len(self.env_spec.obs_dims)):
self.last_obs[j][i] = obs[j]
for j in xrange(len(self.env_spec.act_dims)):
self.last_act[j][i] = -1
self.last_pad[i] = 0
# maintain episode as a single unit if the last sampling
# batch ended before the episode was terminated
if self.unify_episodes:
assert len(obs_after_reset) == 1
new_ep = obs_after_reset[0] is not None
else:
new_ep = True
self.start_id = 0 if new_ep else len(self.all_obs[:])
initial_state = self.internal_state
all_obs = [] if new_ep else self.all_obs[:]
all_act = ([self.last_act] if new_ep else self.all_act[:])
all_pad = [] if new_ep else self.all_pad[:]
rewards = [] if new_ep else self.rewards[:]
# start stepping in the environments
step = 0
while not self.env.all_done():
self.step_count += 1 - np.array(self.env.dones)
next_internal_state, sampled_actions = self.model.sample_step(
sess, self.last_obs, self.internal_state, self.last_act,
greedy=greedy)
env_actions = self.env_spec.convert_actions_to_env(sampled_actions)
next_obs, reward, next_dones, _ = self.env.step(env_actions)
all_obs.append(self.last_obs)
all_act.append(sampled_actions)
all_pad.append(self.last_pad)
rewards.append(reward)
self.internal_state = next_internal_state
self.last_obs = next_obs
self.last_act = sampled_actions
self.last_pad = np.array(next_dones).astype('float32')
step += 1
if self.max_step and step >= self.max_step:
break
self.all_obs = all_obs[:]
self.all_act = all_act[:]
self.all_pad = all_pad[:]
self.rewards = rewards[:]
# append final observation
all_obs.append(self.last_obs)
return initial_state, all_obs, all_act, rewards, all_pad
def sample_episodes(self, sess, greedy=False):
"""Sample steps from the environment until we have enough for a batch."""
# check if last batch ended with episode that was not terminated
if self.unify_episodes:
self.all_new_ep = self.start_episode[0]
# sample episodes until we either have enough episodes or enough steps
episodes = []
total_steps = 0
while total_steps < self.max_step * len(self.env):
(initial_state,
observations, actions, rewards,
pads) = self._sample_episodes(sess, greedy=greedy)
observations = zip(*observations)
actions = zip(*actions)
terminated = np.array(self.env.dones)
self.total_rewards = np.sum(np.array(rewards[self.start_id:]) *
(1 - np.array(pads[self.start_id:])), axis=0)
self.episode_running_rewards *= 1 - self.start_episode
self.episode_running_lengths *= 1 - self.start_episode
self.episode_running_rewards += self.total_rewards
self.episode_running_lengths += np.sum(1 - np.array(pads[self.start_id:]), axis=0)
episodes.extend(self.convert_from_batched_episodes(
initial_state, observations, actions, rewards,
terminated, pads))
total_steps += np.sum(1 - np.array(pads))
# set next starting episodes
self.start_episode = np.logical_or(terminated,
self.step_count >= self.cutoff_agent)
episode_rewards = self.episode_running_rewards[self.start_episode].tolist()
self.episode_rewards.extend(episode_rewards)
self.episode_lengths.extend(self.episode_running_lengths[self.start_episode].tolist())
self.episode_rewards = self.episode_rewards[-100:]
self.episode_lengths = self.episode_lengths[-100:]
if (self.save_trajectories_file is not None and
(self.best_batch_rewards is None or
np.mean(self.total_rewards) > self.best_batch_rewards)):
self.best_batch_rewards = np.mean(self.total_rewards)
my_episodes = self.convert_from_batched_episodes(
initial_state, observations, actions, rewards,
terminated, pads)
with gfile.GFile(self.save_trajectories_file, 'w') as f:
pickle.dump(my_episodes, f)
if not self.batch_by_steps:
return (initial_state,
observations, actions, rewards,
terminated, pads)
return self.convert_to_batched_episodes(episodes)
def _train(self, sess,
observations, initial_state, actions,
rewards, terminated, pads):
"""Train model using batch."""
avg_episode_reward = np.mean(self.episode_rewards)
greedy_episode_reward = (np.mean(self.greedy_episode_rewards)
if self.greedy_episode_rewards else
avg_episode_reward)
loss, summary = None, None
if self.use_trust_region:
# use trust region to optimize policy
loss, _, summary = self.model.trust_region_step(
sess,
observations, initial_state, actions,
rewards, terminated, pads,
avg_episode_reward=avg_episode_reward,
greedy_episode_reward=greedy_episode_reward)
else: # otherwise use simple gradient descent on policy
loss, _, summary = self.model.train_step(
sess,
observations, initial_state, actions,
rewards, terminated, pads,
avg_episode_reward=avg_episode_reward,
greedy_episode_reward=greedy_episode_reward)
if self.use_value_opt: # optionally perform specific value optimization
self.model.fit_values(
sess,
observations, initial_state, actions,
rewards, terminated, pads)
return loss, summary
def train(self, sess):
"""Sample some episodes and train on some episodes."""
cur_step = sess.run(self.model.inc_global_step)
self.cur_step = cur_step
# on the first iteration, set target network close to online network
if self.cur_step == 0:
for _ in xrange(100):
sess.run(self.model.copy_op)
# on other iterations, just perform single target <-- online operation
sess.run(self.model.copy_op)
# sample from env
(initial_state,
observations, actions, rewards,
terminated, pads) = self.sample_episodes(sess)
# add to replay buffer
self.add_to_replay_buffer(
initial_state, observations, actions,
rewards, terminated, pads)
loss, summary = 0, None
# train on online batch
if self.use_online_batch:
loss, summary = self._train(
sess,
observations, initial_state, actions,
rewards, terminated, pads)
# update relative entropy coefficient
if self.update_eps_lambda:
episode_rewards = np.array(self.episode_rewards)
episode_lengths = np.array(self.episode_lengths)
eps_lambda = find_best_eps_lambda(
episode_rewards[-20:], episode_lengths[-20:])
sess.run(self.model.objective.assign_eps_lambda,
feed_dict={self.model.objective.new_eps_lambda: eps_lambda})
# train on replay batch
replay_batch, replay_probs = self.get_from_replay_buffer(
self.replay_batch_size)
if replay_batch:
(initial_state,
observations, actions, rewards,
terminated, pads) = replay_batch
loss, summary = self._train(
sess,
observations, initial_state, actions,
rewards, terminated, pads)
return loss, summary, self.total_rewards, self.episode_rewards
def eval(self, sess):
"""Use greedy sampling."""
(initial_state,
observations, actions, rewards,
pads, terminated) = self.sample_episodes(sess, greedy=True)
total_rewards = np.sum(np.array(rewards) * (1 - np.array(pads)), axis=0)
return total_rewards, self.episode_rewards
def convert_from_batched_episodes(
self, initial_state, observations, actions, rewards,
terminated, pads):
"""Convert time-major batch of episodes to batch-major list of episodes."""
rewards = np.array(rewards)
pads = np.array(pads)
observations = [np.array(obs) for obs in observations]
actions = [np.array(act) for act in actions]
total_rewards = np.sum(rewards * (1 - pads), axis=0)
total_length = np.sum(1 - pads, axis=0).astype('int32')
episodes = []
num_episodes = rewards.shape[1]
for i in xrange(num_episodes):
length = total_length[i]
ep_initial = initial_state[i]
ep_obs = [obs[:length + 1, i, ...] for obs in observations]
ep_act = [act[:length + 1, i, ...] for act in actions]
ep_rewards = rewards[:length, i]
episodes.append(
[ep_initial, ep_obs, ep_act, ep_rewards, terminated[i]])
return episodes
def convert_to_batched_episodes(self, episodes, max_length=None):
"""Convert batch-major list of episodes to time-major batch of episodes."""
lengths = [len(ep[-2]) for ep in episodes]
max_length = max_length or max(lengths)
new_episodes = []
for ep, length in zip(episodes, lengths):
initial, observations, actions, rewards, terminated = ep
observations = [np.resize(obs, [max_length + 1] + list(obs.shape)[1:])
for obs in observations]
actions = [np.resize(act, [max_length + 1] + list(act.shape)[1:])
for act in actions]
pads = np.array([0] * length + [1] * (max_length - length))
rewards = np.resize(rewards, [max_length]) * (1 - pads)
new_episodes.append([initial, observations, actions, rewards,
terminated, pads])
(initial, observations, actions, rewards,
terminated, pads) = zip(*new_episodes)
observations = [np.swapaxes(obs, 0, 1)
for obs in zip(*observations)]
actions = [np.swapaxes(act, 0, 1)
for act in zip(*actions)]
rewards = np.transpose(rewards)
pads = np.transpose(pads)
return (initial, observations, actions, rewards, terminated, pads)
def add_to_replay_buffer(self, initial_state,
observations, actions, rewards,
terminated, pads):
"""Add batch of episodes to replay buffer."""
if self.replay_buffer is None:
return
rewards = np.array(rewards)
pads = np.array(pads)
total_rewards = np.sum(rewards * (1 - pads), axis=0)
episodes = self.convert_from_batched_episodes(
initial_state, observations, actions, rewards,
terminated, pads)
priorities = (total_rewards if self.prioritize_by == 'reward'
else self.cur_step)
if not self.unify_episodes or self.all_new_ep:
self.last_idxs = self.replay_buffer.add(
episodes, priorities)
else:
# If we are unifying episodes, we attempt to
# keep them unified in the replay buffer.
# The first episode sampled in the current batch is a
# continuation of the last episode from the previous batch
self.replay_buffer.add(episodes[:1], priorities, self.last_idxs[-1:])
if len(episodes) > 1:
self.replay_buffer.add(episodes[1:], priorities)
def get_from_replay_buffer(self, batch_size):
"""Sample a batch of episodes from the replay buffer."""
if self.replay_buffer is None or len(self.replay_buffer) < 1 * batch_size:
return None, None
desired_count = batch_size * self.max_step
# in the case of batch_by_steps, we sample larger and larger
# amounts from the replay buffer until we have enough steps.
while True:
if batch_size > len(self.replay_buffer):
batch_size = len(self.replay_buffer)
episodes, probs = self.replay_buffer.get_batch(batch_size)
count = sum(len(ep[-2]) for ep in episodes)
if count >= desired_count or not self.batch_by_steps:
break
if batch_size == len(self.replay_buffer):
return None, None
batch_size *= 1.2
return (self.convert_to_batched_episodes(episodes), probs)
def seed_replay_buffer(self, episodes):
"""Seed the replay buffer with some episodes."""
if self.replay_buffer is None:
return
# just need to add initial state
for i in xrange(len(episodes)):
episodes[i] = [self.initial_internal_state()] + episodes[i]
self.replay_buffer.seed_buffer(episodes)
| |
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from c7n_azure import constants
from c7n_azure.actions.logic_app import LogicAppAction
from azure.mgmt.resourcegraph.models import QueryRequest
from c7n_azure.actions.notify import Notify
from c7n_azure.filters import ParentFilter
from c7n_azure.provider import resources
from c7n.actions import ActionRegistry
from c7n.exceptions import PolicyValidationError
from c7n.filters import FilterRegistry
from c7n.manager import ResourceManager
from c7n.query import sources, MaxResourceLimit
from c7n.utils import local_session
log = logging.getLogger('custodian.azure.query')
class ResourceQuery(object):
def __init__(self, session_factory):
self.session_factory = session_factory
def filter(self, resource_manager, **params):
m = resource_manager.resource_type
enum_op, list_op, extra_args = m.enum_spec
if extra_args:
params.update(extra_args)
params.update(m.extra_args(resource_manager))
try:
op = getattr(getattr(resource_manager.get_client(), enum_op), list_op)
result = op(**params)
if isinstance(result, Iterable):
return [r.serialize(True) for r in result]
elif hasattr(result, 'value'):
return [r.serialize(True) for r in result.value]
except Exception as e:
log.error("Failed to query resource.\n"
"Type: azure.{0}.\n"
"Error: {1}".format(resource_manager.type, e))
raise
raise TypeError("Enumerating resources resulted in a return"
"value which could not be iterated.")
@staticmethod
def resolve(resource_type):
if not isinstance(resource_type, type):
raise ValueError(resource_type)
else:
m = resource_type
return m
@sources.register('describe-azure')
class DescribeSource(object):
resource_query_factory = ResourceQuery
def __init__(self, manager):
self.manager = manager
self.query = self.resource_query_factory(self.manager.session_factory)
def validate(self):
pass
def get_resources(self, query):
return self.query.filter(self.manager)
def get_permissions(self):
return ()
def augment(self, resources):
return resources
@sources.register('resource-graph')
class ResourceGraphSource(object):
def __init__(self, manager):
self.manager = manager
def validate(self):
if not hasattr(self.manager.resource_type, 'resource_type'):
raise PolicyValidationError(
"%s is not supported with the Azure Resource Graph source."
% self.manager.data['resource'])
def get_resources(self, _):
log.warning('The Azure Resource Graph source '
'should not be used in production scenarios at this time.')
session = self.manager.get_session()
client = session.client('azure.mgmt.resourcegraph.ResourceGraphClient')
# empty scope will return all resource
query_scope = ""
if self.manager.resource_type.resource_type != 'armresource':
query_scope = "where type =~ '%s'" % self.manager.resource_type.resource_type
query = QueryRequest(
query=query_scope,
subscriptions=[session.get_subscription_id()]
)
res = client.resources(query)
cols = [c['name'] for c in res.data['columns']]
data = [dict(zip(cols, r)) for r in res.data['rows']]
return data
def get_permissions(self):
return ()
def augment(self, resources):
return resources
class ChildResourceQuery(ResourceQuery):
"""A resource query for resources that must be queried with parent information.
Several resource types can only be queried in the context of their
parents identifiers. ie. SQL and Cosmos databases
"""
def filter(self, resource_manager, **params):
"""Query a set of resources."""
m = self.resolve(resource_manager.resource_type) # type: ChildTypeInfo
parents = resource_manager.get_parent_manager()
# Have to query separately for each parent's children.
results = []
for parent in parents.resources():
try:
subset = resource_manager.enumerate_resources(parent, m, **params)
if subset:
# If required, append parent resource ID to all child resources
if m.annotate_parent:
for r in subset:
r[m.parent_key] = parent[parents.resource_type.id]
results.extend(subset)
except Exception as e:
log.warning('Child enumeration failed for {0}. {1}'
.format(parent[parents.resource_type.id], e))
if m.raise_on_exception:
raise e
return results
@sources.register('describe-child-azure')
class ChildDescribeSource(DescribeSource):
resource_query_factory = ChildResourceQuery
class TypeMeta(type):
def __repr__(cls):
return "<Type info service:%s client: %s>" % (
cls.service,
cls.client)
@six.add_metaclass(TypeMeta)
class TypeInfo(object):
doc_groups = None
"""api client construction information"""
service = ''
client = ''
# Default id field, resources should override if different (used for meta filters, report etc)
id = 'id'
resource = constants.RESOURCE_ACTIVE_DIRECTORY
@classmethod
def extra_args(cls, resource_manager):
return {}
@six.add_metaclass(TypeMeta)
class ChildTypeInfo(TypeInfo):
"""api client construction information for child resources"""
parent_manager_name = ''
annotate_parent = True
raise_on_exception = True
parent_key = 'c7n:parent-id'
@classmethod
def extra_args(cls, parent_resource):
return {}
class QueryMeta(type):
"""metaclass to have consistent action/filter registry for new resources."""
def __new__(cls, name, parents, attrs):
if 'filter_registry' not in attrs:
attrs['filter_registry'] = FilterRegistry(
'%s.filters' % name.lower())
if 'action_registry' not in attrs:
attrs['action_registry'] = ActionRegistry(
'%s.actions' % name.lower())
return super(QueryMeta, cls).__new__(cls, name, parents, attrs)
@six.add_metaclass(QueryMeta)
class QueryResourceManager(ResourceManager):
class resource_type(TypeInfo):
pass
def __init__(self, data, options):
super(QueryResourceManager, self).__init__(data, options)
self.source = self.get_source(self.source_type)
self._session = None
def augment(self, resources):
return resources
def get_permissions(self):
return ()
def get_source(self, source_type):
return sources.get(source_type)(self)
def get_session(self):
if self._session is None:
self._session = local_session(self.session_factory)
return self._session
def get_client(self, service=None):
if not service:
return self.get_session().client(
"%s.%s" % (self.resource_type.service, self.resource_type.client))
return self.get_session().client(service)
def get_cache_key(self, query):
return {'source_type': self.source_type, 'query': query}
@classmethod
def get_model(cls):
return ResourceQuery.resolve(cls.resource_type)
@property
def source_type(self):
return self.data.get('source', 'describe-azure')
def resources(self, query=None):
cache_key = self.get_cache_key(query)
resources = None
if self._cache.load():
resources = self._cache.get(cache_key)
if resources is not None:
self.log.debug("Using cached %s: %d" % (
"%s.%s" % (self.__class__.__module__,
self.__class__.__name__),
len(resources)))
if resources is None:
resources = self.augment(self.source.get_resources(query))
self._cache.save(cache_key, resources)
resource_count = len(resources)
resources = self.filter_resources(resources)
# Check if we're out of a policies execution limits.
if self.data == self.ctx.policy.data:
self.check_resource_limit(len(resources), resource_count)
return resources
def check_resource_limit(self, selection_count, population_count):
"""Check if policy's execution affects more resources then its limit.
"""
p = self.ctx.policy
max_resource_limits = MaxResourceLimit(p, selection_count, population_count)
return max_resource_limits.check_resource_limits()
def get_resources(self, resource_ids, **params):
resource_client = self.get_client()
m = self.resource_type
get_client, get_op, extra_args = m.get_spec
if extra_args:
params.update(extra_args)
op = getattr(getattr(resource_client, get_client), get_op)
data = [
op(rid, **params)
for rid in resource_ids
]
return [r.serialize(True) for r in data]
@staticmethod
def register_actions_and_filters(registry, resource_class):
resource_class.action_registry.register('notify', Notify)
if 'logic-app' not in resource_class.action_registry:
resource_class.action_registry.register('logic-app', LogicAppAction)
def validate(self):
self.source.validate()
@six.add_metaclass(QueryMeta)
class ChildResourceManager(QueryResourceManager):
child_source = 'describe-child-azure'
parent_manager = None
@property
def source_type(self):
source = self.data.get('source', self.child_source)
if source == 'describe':
source = self.child_source
return source
def get_parent_manager(self):
if not self.parent_manager:
self.parent_manager = self.get_resource_manager(self.resource_type.parent_manager_name)
return self.parent_manager
def get_session(self):
if self._session is None:
session = super(ChildResourceManager, self).get_session()
if self.resource_type.resource != constants.RESOURCE_ACTIVE_DIRECTORY:
session = session.get_session_for_resource(self.resource_type.resource)
self._session = session
return self._session
def enumerate_resources(self, parent_resource, type_info, **params):
client = self.get_client()
enum_op, list_op, extra_args = self.resource_type.enum_spec
# There are 2 types of extra_args:
# - static values stored in 'extra_args' dict (e.g. some type)
# - dynamic values are retrieved via 'extra_args' method (e.g. parent name)
if extra_args:
params.update({key: extra_args[key](parent_resource) for key in extra_args.keys()})
params.update(type_info.extra_args(parent_resource))
# Some resources might not have enum_op piece (non-arm resources)
if enum_op:
op = getattr(getattr(client, enum_op), list_op)
else:
op = getattr(client, list_op)
result = op(**params)
if isinstance(result, Iterable):
return [r.serialize(True) for r in result]
elif hasattr(result, 'value'):
return [r.serialize(True) for r in result.value]
raise TypeError("Enumerating resources resulted in a return"
"value which could not be iterated.")
@staticmethod
def register_child_specific(registry, resource_class):
if not issubclass(resource_class, ChildResourceManager):
return
# If Child Resource doesn't annotate parent, there is no way to filter based on
# parent properties.
if resource_class.resource_type.annotate_parent:
resource_class.filter_registry.register('parent', ParentFilter)
resources.subscribe(QueryResourceManager.register_actions_and_filters)
resources.subscribe(ChildResourceManager.register_child_specific)
| |
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import thread_cert
from pktverify.consts import MLE_PARENT_REQUEST, MLE_DATA_RESPONSE, MLE_DATA_REQUEST, MGMT_PENDING_SET_URI, SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ACTIVE_OPERATION_DATASET_TLV, ACTIVE_TIMESTAMP_TLV, PENDING_TIMESTAMP_TLV, TLV_REQUEST_TLV, NETWORK_DATA_TLV, NM_BORDER_AGENT_LOCATOR_TLV, NM_COMMISSIONER_SESSION_ID_TLV, NM_DELAY_TIMER_TLV, PENDING_OPERATION_DATASET_TLV, NWD_COMMISSIONING_DATA_TLV
from pktverify.packet_verifier import PacketVerifier
from pktverify.null_field import nullField
KEY1 = '00112233445566778899aabbccddeeff'
KEY2 = 'ffeeddccbbaa99887766554433221100'
CHANNEL_INIT = 19
PANID_INIT = 0xface
COMMISSIONER = 1
LEADER = 2
ROUTER1 = 3
ED1 = 4
SED1 = 5
# Test Purpose and Description:
# -----------------------------
# The purpose of this test case is to confirm the DUT correctly applies
# DELAY_TIMER_DEFAULT when the network key is changed.
# The Commissioner first tries to set a network key update to happen too
# soon (delay of 60s vs DELAY_TIMER_DEFAULT of 300s); the DUT is expected
# to override the short value and communicate an appropriately longer delay
# to the Router.
# The Commissioner then sets a delay time longer than default; the DUT is
# validated to not artificially clamp the longer time back to the
# DELAY_TIMER_DEFAULT value.
#
# Test Topology:
# -------------
# Commissioner
# |
# Leader
# |
# Router
# / \
# ED SED
#
# DUT Types:
# ----------
# Leader
class Cert_9_2_11_NetworkKey(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'name': 'COMMISSIONER',
'active_dataset': {
'timestamp': 10,
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'network_key': KEY1
},
'mode': 'rdn',
'allowlist': [LEADER]
},
LEADER: {
'name': 'LEADER',
'active_dataset': {
'timestamp': 10,
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'network_key': KEY1
},
'mode': 'rdn',
'allowlist': [COMMISSIONER, ROUTER1]
},
ROUTER1: {
'name': 'ROUTER',
'active_dataset': {
'timestamp': 10,
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'network_key': KEY1
},
'mode': 'rdn',
'allowlist': [LEADER, ED1, SED1]
},
ED1: {
'name': 'ED',
'channel': CHANNEL_INIT,
'is_mtd': True,
'networkkey': KEY1,
'mode': 'rn',
'panid': PANID_INIT,
'allowlist': [ROUTER1]
},
SED1: {
'name': 'SED',
'channel': CHANNEL_INIT,
'is_mtd': True,
'networkkey': KEY1,
'mode': '-',
'panid': PANID_INIT,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER1]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.nodes[SED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED1].get_state(), 'child')
self.collect_rlocs()
self.collect_ipaddrs()
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=10,
active_timestamp=70,
delay_timer=60000,
network_key=KEY2,
)
self.simulator.go(310)
self.assertEqual(self.nodes[COMMISSIONER].get_networkkey(), KEY2)
self.assertEqual(self.nodes[LEADER].get_networkkey(), KEY2)
self.assertEqual(self.nodes[ROUTER1].get_networkkey(), KEY2)
self.assertEqual(self.nodes[ED1].get_networkkey(), KEY2)
self.assertEqual(self.nodes[SED1].get_networkkey(), KEY2)
ipaddr = self.nodes[LEADER].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
self.assertTrue(self.nodes[ROUTER1].ping(ipaddr))
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=20,
active_timestamp=30,
delay_timer=500000,
network_key=KEY1,
)
self.simulator.go(510)
self.assertEqual(self.nodes[COMMISSIONER].get_networkkey(), KEY1)
self.assertEqual(self.nodes[LEADER].get_networkkey(), KEY1)
self.assertEqual(self.nodes[ROUTER1].get_networkkey(), KEY1)
self.assertEqual(self.nodes[ED1].get_networkkey(), KEY1)
self.assertEqual(self.nodes[SED1].get_networkkey(), KEY1)
ipaddr = self.nodes[LEADER].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
self.assertTrue(self.nodes[ROUTER1].ping(ipaddr))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
LEADER_MLEID = pv.vars['LEADER_MLEID']
COMMISSIONER = pv.vars['COMMISSIONER']
COMMISSIONER_RLOC = pv.vars['COMMISSIONER_RLOC']
ROUTER = pv.vars['ROUTER']
ROUTER_MLEID = pv.vars['ROUTER_MLEID']
ED = pv.vars['ED']
SED = pv.vars['SED']
# Step 1: Ensure the topology is formed correctly
for node in ('COMMISSIONER', 'ROUTER'):
pv.verify_attached(node, 'LEADER')
for node in ('ED', 'SED'):
pv.verify_attached(node, 'ROUTER', 'MTD')
_pkt = pkts.last()
# Step 3: Leader sends MGMT_PENDING_SET.rsq to the Commissioner:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# - State TLV (value = Accept)
#
# Leader MUST multicast MLE Data Response with the new network data,
# including the following TLVs:
# - Leader Data TLV:
# Data Version field incremented
# Stable Version field incremented
# - Network Data TLV:
# - Commissioner Data TLV:
# Stable flag set to 0
# Border Agent Locator TLV
# Commissioner Session ID TLV
# - Active Timestamp TLV: 10s
# - Pending Timestamp TLV: 20s
pkts.filter_coap_ack(MGMT_PENDING_SET_URI).\
filter_wpan_src64(LEADER).\
filter_ipv6_dst(COMMISSIONER_RLOC).\
must_next().\
must_verify(lambda p: p.thread_meshcop.tlv.state == 1)
pkts.filter_mle_cmd(MLE_DATA_RESPONSE).\
filter_wpan_src64(LEADER).\
filter_LLANMA().\
filter(lambda p: p.mle.tlv.active_tstamp == 10 and\
p.mle.tlv.pending_tstamp == 10 and\
(p.mle.tlv.leader_data.data_version -
_pkt.mle.tlv.leader_data.data_version) % 256 <= 127 and\
(p.mle.tlv.leader_data.stable_data_version -
_pkt.mle.tlv.leader_data.stable_data_version) % 256 <= 127 and\
p.thread_nwd.tlv.stable == [0] and\
NWD_COMMISSIONING_DATA_TLV in p.thread_nwd.tlv.type and\
NM_COMMISSIONER_SESSION_ID_TLV in p.thread_meshcop.tlv.type and\
NM_BORDER_AGENT_LOCATOR_TLV in p.thread_meshcop.tlv.type
).\
must_next()
# Step 5: Leader sends a MLE Data Response to Router including the following TLVs:
# - Source Address TLV
# - Leader Data TLV
# - Network Data TLV
# - Commissioner Data TLV:
# Stable flag set to 0
# Border Agent Locator TLV
# Commissioner Session ID TLV
# - Active Timestamp TLV
# - Pending Timestamp TLV
# - Pending Operational Dataset TLV
# - Delay Timer TLV <greater than 200s>
# - Network Key TLV: New Network Key
# - Active Timestamp TLV <70s>
_dr_pkt = pkts.filter_mle_cmd(MLE_DATA_RESPONSE).\
filter_wpan_src64(LEADER).\
filter_wpan_dst64(ROUTER).\
filter(lambda p: {
SOURCE_ADDRESS_TLV,
LEADER_DATA_TLV,
ACTIVE_TIMESTAMP_TLV,
PENDING_TIMESTAMP_TLV,
PENDING_OPERATION_DATASET_TLV
} <= set(p.mle.tlv.type) and\
p.thread_nwd.tlv.stable == [0] and\
NWD_COMMISSIONING_DATA_TLV in p.thread_nwd.tlv.type and\
NM_COMMISSIONER_SESSION_ID_TLV in p.thread_meshcop.tlv.type and\
NM_BORDER_AGENT_LOCATOR_TLV in p.thread_meshcop.tlv.type and\
p.thread_meshcop.tlv.delay_timer > 200000 and\
p.thread_meshcop.tlv.master_key == KEY2 and\
p.thread_meshcop.tlv.active_tstamp == 70
).\
must_next()
# Step 8: Verify all devices now use New Network key.
# checked in test()
# Step 9: Verify new MAC key is generated and used when sending ICMPv6 Echo Reply
# is received.
_pkt = pkts.filter_ping_request().\
filter_wpan_src64(ROUTER).\
filter_ipv6_dst(LEADER_MLEID).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_wpan_src64(LEADER).\
filter_ipv6_dst(ROUTER_MLEID).\
must_next()
# Step 11: Leader sends MGMT_PENDING_SET.rsq to the Commissioner:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# - State TLV (value = Accept)
#
# Leader MUST multicast MLE Data Response with the new network data,
# including the following TLVs:
# - Leader Data TLV:
# Data Version field incremented
# Stable Version field incremented
# - Network Data TLV:
# - Commissioner Data TLV:
# Stable flag set to 0
# Border Agent Locator TLV
# Commissioner Session ID TLV
# - Active Timestamp TLV: 70s
# - Pending Timestamp TLV: 20s
pkts.filter_coap_ack(MGMT_PENDING_SET_URI).\
filter_wpan_src64(LEADER).\
filter_ipv6_dst(COMMISSIONER_RLOC).\
must_next().\
must_verify(lambda p: p.thread_meshcop.tlv.state == 1)
pkts.filter_mle_cmd(MLE_DATA_RESPONSE).\
filter_wpan_src64(LEADER).\
filter_LLANMA().\
filter(lambda p: p.mle.tlv.active_tstamp == 70 and\
p.mle.tlv.pending_tstamp == 20 and\
(p.mle.tlv.leader_data.data_version -
_dr_pkt.mle.tlv.leader_data.data_version) % 256 <= 127 and\
(p.mle.tlv.leader_data.stable_data_version -
_dr_pkt.mle.tlv.leader_data.stable_data_version) % 256 <= 127 and\
p.thread_nwd.tlv.stable == [0] and\
NWD_COMMISSIONING_DATA_TLV in p.thread_nwd.tlv.type and\
NM_COMMISSIONER_SESSION_ID_TLV in p.thread_meshcop.tlv.type and\
NM_BORDER_AGENT_LOCATOR_TLV in p.thread_meshcop.tlv.type
).\
must_next()
# Step 13: Leader sends a MLE Data Response to Router including the following TLVs:
# - Source Address TLV
# - Leader Data TLV
# - Network Data TLV
# - Commissioner Data TLV:
# Stable flag set to 0
# Border Agent Locator TLV
# Commissioner Session ID TLV
# - Active Timestamp TLV <70s>
# - Pending Timestamp TLV <20s>
# - Pending Operational Dataset TLV
# - Active Timestamp TLV <30s>
# - Delay Timer TLV <greater than 300s>
# - Network Key TLV: New Network Key
pkts.filter_mle_cmd(MLE_DATA_RESPONSE).\
filter_wpan_src64(LEADER).\
filter_wpan_dst64(ROUTER).\
filter(lambda p: {
SOURCE_ADDRESS_TLV,
LEADER_DATA_TLV,
ACTIVE_TIMESTAMP_TLV,
PENDING_TIMESTAMP_TLV,
PENDING_OPERATION_DATASET_TLV
} <= set(p.mle.tlv.type) and\
p.thread_nwd.tlv.stable == [0] and\
NWD_COMMISSIONING_DATA_TLV in p.thread_nwd.tlv.type and\
NM_COMMISSIONER_SESSION_ID_TLV in p.thread_meshcop.tlv.type and\
NM_BORDER_AGENT_LOCATOR_TLV in p.thread_meshcop.tlv.type and\
p.mle.tlv.active_tstamp == 70 and\
p.mle.tlv.pending_tstamp == 20 and\
p.thread_meshcop.tlv.delay_timer > 300000 and\
p.thread_meshcop.tlv.master_key == KEY1 and\
p.thread_meshcop.tlv.active_tstamp == 30
).\
must_next()
# Step 17: The DUT MUST send an ICMPv6 Echo Reply using the new Network key
_pkt = pkts.filter_ping_request().\
filter_wpan_src64(ROUTER).\
filter_ipv6_dst(LEADER_MLEID).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_wpan_src64(LEADER).\
filter_ipv6_dst(ROUTER_MLEID).\
must_next()
if __name__ == '__main__':
unittest.main()
| |
"""Simple XML-RPC Server.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCRequestHandler
class.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the string functions available through
# string.func_name
import string
self.string = string
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _dispatch(self, method, params):
if method == 'pow':
return apply(pow, params)
elif method == 'add':
return params[0] + params[1]
else:
raise 'bad method'
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCRequestHandler:
class MathHandler(SimpleXMLRPCRequestHandler):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return apply(func, params)
def log_message(self, format, *args):
pass # maybe do something fancy like write the messages to a file
def export_add(self, x, y):
return x + y
server = SimpleXMLRPCServer(("localhost", 8000), MathHandler)
server.serve_forever()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
import xmlrpclib
import SocketServer
import BaseHTTPServer
import sys
class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
XML-RPC requests are dispatched to the _dispatch method, which
may be overriden by subclasses. The default implementation attempts
to dispatch XML-RPC calls to the functions or instance installed
in the server.
"""
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the _dispatch method for handling.
"""
try:
# get arguments
data = self.rfile.read(int(self.headers["content-length"]))
params, method = xmlrpclib.loads(data)
# generate response
try:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
except:
# report exception back to server
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value))
)
else:
response = xmlrpclib.dumps(response, methodresponse=1)
except:
# internal error, report as HTTP server error
self.send_response(500)
self.end_headers()
else:
# got a valid XML RPC response
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# shut down the connection
self.wfile.flush()
self.connection.shutdown(1)
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
it's parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called by SimpleXMLRPCServer.
"""
def resolve_dotted_attribute(obj, attr):
"""resolve_dotted_attribute(math, 'cos.__doc__') => math.cos.__doc__
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts
with a '_'.
"""
for i in attr.split('.'):
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
func = None
try:
# check to see if a matching function has been registered
func = self.server.funcs[method]
except KeyError:
if self.server.instance is not None:
# check for a _dispatch method
if hasattr(self.server.instance, '_dispatch'):
return apply(
getattr(self.server.instance,'_dispatch'),
(method, params)
)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.server.instance,
method
)
except AttributeError:
pass
if func is not None:
return apply(func, params)
else:
raise Exception('method "%s" is not supported' % method)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(SocketServer.TCPServer):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=1):
self.funcs = {}
self.logRequests = logRequests
self.instance = None
SocketServer.TCPServer.__init__(self, addr, requestHandler)
def register_instance(self, instance):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
it's parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called by SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
"""
self.instance = instance
def register_function(self, function, name = None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
If an instance is also registered then it will only be called
if a matching function is not found.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
if __name__ == '__main__':
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
| |
# Copyright (c) 2011 Nokia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""GLES performance report generator."""
import Report
import ReportGenerator
from common import Task
import Trace
import GlesTraceOperations
import GlesChecklist
import Common
from common import Log
import os
def addRenderTargetSection(g):
s = g.report.create(Report.Section, "Render targets")
# Check the used EGL configurations and surfaces
configs = set()
for event in g.trace.events:
for key, value in event.values.items():
if isinstance(value, Trace.Object):
if value.cls.name == "EGLConfig":
id = value.attrs.get("config_id")
if not id or id in configs:
continue
configs.add(id)
s2 = s.create(Report.Section, "EGL configuration #%d" % id)
t = s2.create(Report.Table, ["Property", "Value"])
for cfgKey, cfgValue in value.attrs.items():
cfgKey = cfgKey.replace("_", " ").capitalize()
t.addRow(cfgKey, cfgValue)
if event.name == "eglCreateWindowSurface":
window = event.values.get("window")
if not window:
continue
s2 = s.create(Report.Section, "Window surface 0x%x" % event.values[None].id)
t = s2.create(Report.Table, ["Property", "Value"])
t.addRow("Width", window.attrs["width"])
t.addRow("Height", window.attrs["height"])
t.addRow("Config ID", event.values["config"].id)
elif event.name == "eglCreatePixmapSurface":
pixmap = event.values.get("pixmap")
if not pixmap:
continue
s2 = s.create(Report.Section, "Pixmap surface 0x%x" % event.values[None].id)
t = s2.create(Report.Table, ["Property", "Value"])
t.addRow("Width", pixmap.attrs["width"])
t.addRow("Height", pixmap.attrs["height"])
t.addRow("Config ID", event.values["config"].id)
elif event.name == "eglCreatePbufferSurface":
attrs = event.values.get("attrib_list")
if not attrs:
continue
try:
width = attrs[attrs.index(g.constants.EGL_WIDTH) + 1]
height = attrs[attrs.index(g.constants.EGL_HEIGHT) + 1]
except ValueError:
continue
s2 = s.create(Report.Section, "Pbuffer surface 0x%x" % event.values[None].id)
t = s2.create(Report.Table, ["Property", "Value"])
t.addRow("Width", width)
t.addRow("Height", height)
t.addRow("Config ID", event.values["config"].id)
def generateReport(project, trace, traceFileName, path, format):
if traceFileName:
title = "OpenGL ES performance report for %s" % os.path.basename(traceFileName)
else:
title = "OpenGL ES performance report"
g = ReportGenerator.ReportGenerator(project, trace, title, path, format)
# Calculate GLES specific stats
GlesTraceOperations.calculateStatistics(project, trace)
# Calculate general stats
g.calculateStatistics()
# Add some general information first
section = g.report.create(Report.Section, "General statistics")
table = g.createGeneralStatisticsTable()
if traceFileName:
table.addRow("File name", traceFileName)
section.add(table)
# Add a section about the used render targets
addRenderTargetSection(g)
# Add an overall timeline of all events
g.report.add(g.createEventPlot("Event distribution", trace.events))
# Add a graph about the event type distribution
g.report.add(g.createEventFrequencyPlot("Operation distribution", trace.events))
# Add overview section
overviewSection = g.report.create(Report.Section, "Overview")
# Frame thumbnails
thumbnailSection = overviewSection.create(Report.Section, "Selected frames")
thumbnails = g.createEventThumbnails([f.swapEvent for f in g.interestingFrames])
for frame, thumbnail in zip(g.interestingFrames, thumbnails):
thumbnailSection.create(Report.Link, "#frame%d" % (frame.number + 1), thumbnail)
# Textures
textureLoaders = GlesTraceOperations.getTextureLoaders(project, trace)
if textureLoaders:
textureSection = overviewSection.create(Report.Section, "Loaded textures")
task = Task.startTask("load-textures", "Loading textures", len(textureLoaders))
for event, func in textureLoaders:
task.step()
image = func().convert("RGBA")
fn = os.path.join(path, "texture%03d.png" % event.seq)
image.save(fn)
textureSection.create(Report.Image, fn)
# FPS
data = [1.0 / f.duration for f in g.frames]
plot = g.createPlot("Frames per second", range(len(g.frames)), data)
overviewSection.add(plot)
# Render calls
data = [len(f.events) for f in g.frames]
plot = g.createPlot("Number of API calls per frame", range(len(g.frames)), data)
overviewSection.add(plot)
# Overdraw
data = [f.swapEvent.sensorData.get("draw_ratio", 0) for f in g.frames]
plot = g.createPlot("Draw ratio", range(len(g.frames)), data)
overviewSection.add(plot)
# Fragment count
data = [f.swapEvent.sensorData.get("rasterizer_pixels", 0) for f in g.frames]
plot = g.createPlot("Rasterized fragments per frame", range(len(g.frames)), data)
overviewSection.add(plot)
# Texture reads
data = [f.swapEvent.sensorData.get("rasterizer_texel_fetches", 0) for f in g.frames]
plot = g.createPlot("Texel fetches per frame", range(len(g.frames)), data)
overviewSection.add(plot)
# Texture uploads
data = [f.swapEvent.sensorData.get("texel_uploads", 0) for f in g.frames]
plot = g.createPlot("Texel uploads per frame", range(len(g.frames)), data)
overviewSection.add(plot)
# Now go over each interesting frame
task = Task.startTask("frame-report", "Generating report", len(g.interestingFrames))
frameDetailSection = g.report.create(Report.Section, "Detailed frame statistics")
for frame in g.interestingFrames:
task.step()
frameSection = g.createFrameSection(frame)
# Add some custom plots
plot = g.createSensorPlot(frame, "rasterizer_pixels")
frameSection.add(plot)
plot = g.createSensorPlot(frame, "rasterizer_texel_fetches")
frameSection.add(plot)
plot = g.createSensorPlot(frame, "texel_uploads")
frameSection.add(plot)
# Now go over the individual render calls + the swap event
for event in frame.renderEvents + [frame.swapEvent]:
eventSection = g.createEventSection(event)
frameSection.add(eventSection)
frameDetailSection.add(frameSection)
# Add the checklist result
g.report.add(g.createChecklistSection("Performance Checklist", GlesChecklist.checklistItems))
# Finalize the report
task.finish()
g.generate()
| |
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_wfp_eng__stc_imodem
@file marine-integrations/mi/dataset/parser/test/test_wfp_eng__stc_imodem.py
@author Emily Hahn
@brief Test code for a Wfp_eng__stc_imodem data parser
"""
import ntplib
import struct
from StringIO import StringIO
from nose.plugins.attrib import attr
from mi.core.log import get_logger
log = get_logger()
from mi.core.exceptions import SampleException
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_driver import DataSetDriverConfigKeys
from mi.dataset.driver.WFP_ENG.STC_IMODEM.driver import DataTypeKey
from mi.dataset.parser.WFP_E_file_common import StateKey
from mi.dataset.parser.wfp_eng__stc_imodem import WfpEngStcImodemParser
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStartRecoveredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStatusRecoveredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemEngineeringRecoveredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStartTelemeteredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStatusTelemeteredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemEngineeringTelemeteredDataParticle
import os
from mi.idk.config import Config
RESOURCE_PATH = os.path.join(Config().base_dir(), 'mi', 'dataset', 'driver', 'WFP_ENG', 'wfp', 'resource')
@attr('UNIT', group='mi')
class WfpEngStcImodemParserUnitTestCase(ParserUnitTestCase):
"""
Wfp_eng__stc_imodem Parser unit test suite
"""
TEST_DATA_SHORT = "\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac" \
"\x1d\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t\xf2\xf7A9A!\x00\x00\x00" \
"\x00\x00\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^" \
"\x00OR\x9d\xac*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O"
TEST_DATA = "\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac\x1d\x00" \
"\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t\xf2\xf7A9A!\x00\x00\x00\x00" \
"\x00\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^" \
"\x00OR\x9d\xac*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00OR\x9d\xac/C\xb8COA6\xde" \
"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9d\x00p\x00QR\x9d\xac3C\x98\xe5TA733\x00\x00\x00\x00\x00\x00\x00\x00" \
"\x00\xa4\x00u\x00OR\x9d\xac8C\x9566A7!-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9a\x00o\x00OR\x9d\xac?C\xa1\xd7\xc3" \
"A6\xa6LB\x8bG\xae\x00\x00\x00\x00\x00\xb6\x00v\x00PR\x9d\xacECsS\xfeA7e\xfeB\x88\x00\x00\x00\x00\x00\x00\x00" \
"\x98\x00s\x00QR\x9d\xacKC\x89\x17\x8cA6\xe2\xecB\x84\x99\x9a\x00\x00\x00\x00\x00\xa4\x00\x81\x00PR\x9d\xacQC}\n" \
"\xbfA7\x00hB\x81G\xae\x00\x00\x00\x00\x00\xa2\x00|\x00NR\x9d\xacWCyW\xc7A6\x97\x8dB{\xe1H\x00\x00\x00\x00\x00\x9a" \
"\x00m\x00NR\x9d\xac]C\x8c!#A6\x9f\xbeBuQ\xec\x00\x00\x00\x00\x00\x97\x00s\x00QR\x9d\xaccC\x84!9A6h\nBn\x8f\\\x00" \
"\x00\x00\x00\x00\x9f\x00v\x00NR\x9d\xaciCE\xa5UA6a|Bh=q\x00\x00\x00\x00\x00\x97\x00l\x00PR\x9d\xacoC\xa5\xa5\xad" \
"A5\x94\xafBa\\)\x00\x00\x00\x00\x00\x9b\x00n\x00RR\x9d\xacuC\\\r\x08A6\x14{B[\n=\x00\x00\x00\x00\x00\x9a\x00s\x00" \
"OR\x9d\xac{C\xa3\x0b\xb8A5F\nBT33\x00\x00\x00\x00\x00\x98\x00q\x00NR\x9d\xac\x81CO\xc0+A5\xd7\xdcBM\xd7\n\x00\x00" \
"\x00\x00\x00\x97\x00n\x00PR\x9d\xac\x87Cxp\xd0A5#\xa3BGG\xae\x00\x00\x00\x00\x00\x9b\x00n\x00PR\x9d\xac\x8dC\x84" \
"\xdd\xd9A5X\x10B@\xae\x14\x00\x00\x00\x00\x00\xa5\x00v\x00OR\x9d\xac\x93C\xa0\x85\x01A4j\x7fB:\x14{\x00\x00\x00\x00" \
"\x00\x9c\x00t\x00QR\x9d\xac\x99Cq\xa4\xdbA5:\x92B3\xc2\x8f\x00\x00\x00\x00\x00\x9c\x00x\x00PR\x9d\xac\x9fCg\x07#A5" \
"\x18+B-\x00\x00\x00\x00\x00\x00\x00\x9e\x00m\x00QR\x9d\xac\xa5C\x9bw\x96A4FtB&z\xe1\x00\x00\x00\x00\x00\xd7\x00s" \
"\x00OR\x9d\xac\xabCmP5A4\x9dJB\x1f\xd7\n\x00\x00\x00\x00\x00\x99\x00s\x00PR\x9d\xac\xb1C\xad\x960A3\x8a\tB\x19" \
"(\xf6\x00\x00\x00\x00\x00\x95\x00n\x00OR\x9d\xac\xb7C\x0c\xce]A5\x0f\xfaB\x12\xe1H\x00\x00\x00\x00\x00\x9c\x00u" \
"\x00PR\x9d\xac\xbdC\xa1\xeb\x02A3Z\x85B\x0c=q\x00\x00\x00\x00\x00\x95\x00u\x00OR\x9d\xac\xc3C$\xafOA4\xa23B\x05" \
"\xe1H\x00\x00\x00\x00\x00\x99\x00r\x00PR\x9d\xac\xc9C\xae\xddeA3\x0f(A\xfe(\xf6\x00\x00\x00\x00\x00\x9a\x00o\x00O" \
"R\x9d\xac\xcfA\xfa\xb2:A5\x0b\x0fA\xf2\x8f\\\x00\x00\x00\x00\x00\xaf\x00m\x00P\xff\xff\xff\xff\x00\x00\x00\rR\x9d" \
"\xac\xd4R\x9d\xadQ"
# all flags set to zero
TEST_DATA_BAD_FLAGS = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac\x1d" \
"\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t\xf2\xf7A9A!\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00OR\x9d\xac" \
"*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O"
# took 5 bytes out of second engineering sample
TEST_DATA_BAD_ENG = "\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac\x1d" \
"\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t!\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00OR\x9d\xac" \
"*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O"
def state_callback(self, state, file_ingested):
""" Call back method to watch what comes in via the position callback """
self.file_ingested = file_ingested
self.state_callback_value = state
def pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.publish_callback_value = pub
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED: {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.wfp_eng__stc_imodem_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
'status_data_particle_class': WfpEngStcImodemStatusRecoveredDataParticle,
'start_data_particle_class': WfpEngStcImodemStartRecoveredDataParticle,
'engineering_data_particle_class': WfpEngStcImodemEngineeringRecoveredDataParticle
}
},
DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED: {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.wfp_eng__stc_imodem_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
'status_data_particle_class': WfpEngStcImodemStatusTelemeteredDataParticle,
'start_data_particle_class': WfpEngStcImodemStartTelemeteredDataParticle,
'engineering_data_particle_class': WfpEngStcImodemEngineeringTelemeteredDataParticle
}
},
}
self.start_state = {StateKey.POSITION: 0}
# Define test data particles and their associated timestamps which will be
# compared with returned results
timestamp1_time = self.timestamp_to_ntp('R\x9d\xac\x19')
self.particle_a_start_time_recov = WfpEngStcImodemStartRecoveredDataParticle(
b'\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19',
internal_timestamp=timestamp1_time)
self.particle_a_start_time_telem = WfpEngStcImodemStartTelemeteredDataParticle(
b'\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19',
internal_timestamp=timestamp1_time)
timestamp1_eng = self.timestamp_to_ntp('R\x9d\xac\x1d')
self.particle_a_eng_recov = WfpEngStcImodemEngineeringRecoveredDataParticle(
b'R\x9d\xac\x1d\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00N',
internal_timestamp=timestamp1_eng)
self.particle_a_eng_telem = WfpEngStcImodemEngineeringTelemeteredDataParticle(
b'R\x9d\xac\x1d\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00N',
internal_timestamp=timestamp1_eng)
timestamp2_eng = self.timestamp_to_ntp('R\x9d\xac!')
self.particle_b_eng_recov = WfpEngStcImodemEngineeringRecoveredDataParticle(
b'R\x9d\xac!C\t\xf2\xf7A9A!\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf2\x00c\x00O',
internal_timestamp=timestamp2_eng)
self.particle_b_eng_telem = WfpEngStcImodemEngineeringTelemeteredDataParticle(
b'R\x9d\xac!C\t\xf2\xf7A9A!\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf2\x00c\x00O',
internal_timestamp=timestamp2_eng)
timestamp3_eng = self.timestamp_to_ntp('R\x9d\xac&')
self.particle_c_eng_recov = WfpEngStcImodemEngineeringRecoveredDataParticle(
b"R\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00O",
internal_timestamp=timestamp3_eng)
self.particle_c_eng_telem = WfpEngStcImodemEngineeringTelemeteredDataParticle(
b"R\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00O",
internal_timestamp=timestamp3_eng)
timestamp4_eng = self.timestamp_to_ntp('R\x9d\xac*')
self.particle_d_eng_recov = WfpEngStcImodemEngineeringRecoveredDataParticle(
b'R\x9d\xac*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O',
internal_timestamp=timestamp4_eng)
self.particle_d_eng_telem = WfpEngStcImodemEngineeringTelemeteredDataParticle(
b'R\x9d\xac*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O',
internal_timestamp=timestamp4_eng)
timestamp_last_eng = self.timestamp_to_ntp('R\x9d\xac\xcf')
self.particle_last_eng_recov = WfpEngStcImodemEngineeringRecoveredDataParticle(
b'R\x9d\xac\xcfA\xfa\xb2:A5\x0b\x0fA\xf2\x8f\\\x00\x00\x00\x00\x00\xaf\x00m\x00P',
internal_timestamp=timestamp_last_eng)
self.particle_last_eng_telem = WfpEngStcImodemEngineeringTelemeteredDataParticle(
b'R\x9d\xac\xcfA\xfa\xb2:A5\x0b\x0fA\xf2\x8f\\\x00\x00\x00\x00\x00\xaf\x00m\x00P',
internal_timestamp=timestamp_last_eng)
timestamp1_status = self.timestamp_to_ntp('R\x9d\xac\xd4')
self.particle_a_status_recov = WfpEngStcImodemStatusRecoveredDataParticle(
b'\xff\xff\xff\xff\x00\x00\x00\rR\x9d\xac\xd4R\x9d\xadQ',
internal_timestamp=timestamp1_status)
self.particle_a_status_telem = WfpEngStcImodemStatusTelemeteredDataParticle(
b'\xff\xff\xff\xff\x00\x00\x00\rR\x9d\xac\xd4R\x9d\xadQ',
internal_timestamp=timestamp1_status)
# uncomment the following to generate particles in yml format for driver testing results files
#self.particle_to_yml(self.particle_a_start_time_recov)
#self.particle_to_yml(self.particle_a_eng)
#self.particle_to_yml(self.particle_b_eng)
#self.particle_to_yml(self.particle_c_eng)
#self.particle_to_yml(self.particle_d_eng)
#self.particle_to_yml(self.particle_a_stat)
self.file_ingested = False
self.state_callback_value = None
self.publish_callback_value = None
def timestamp_to_ntp(self, hex_timestamp):
fields = struct.unpack('>I', hex_timestamp)
timestamp = int(fields[0])
return ntplib.system_to_ntp_time(timestamp)
def assert_result(self, result, position, particle, ingested):
self.assertEqual(result, [particle])
self.assertEqual(self.file_ingested, ingested)
self.assertEqual(self.parser._state[StateKey.POSITION], position)
self.assertEqual(self.state_callback_value[StateKey.POSITION], position)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], particle)
def test_simple_recovered(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(1)
self.assert_result(result, 24, self.particle_a_start_time_recov, False)
# next get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng_recov, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng_recov, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_recov, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_recov, True)
# no data left, dont move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_d_eng_recov)
def test_simple_telemetered(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(1)
self.assert_result(result, 24, self.particle_a_start_time_telem, False)
# next get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng_telem, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng_telem, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_telem, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_telem, True)
# no data left, dont move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_d_eng_telem)
def test_get_many_recovered(self):
"""
Read test data and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(1)
self.assert_result(result, 24, self.particle_a_start_time_recov, False)
result = self.parser.get_records(4)
self.assertEqual(result, [self.particle_a_eng_recov,
self.particle_b_eng_recov,
self.particle_c_eng_recov,
self.particle_d_eng_recov])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assertEqual(self.publish_callback_value[0], self.particle_a_eng_recov)
self.assertEqual(self.publish_callback_value[1], self.particle_b_eng_recov)
self.assertEqual(self.publish_callback_value[2], self.particle_c_eng_recov)
self.assertEqual(self.publish_callback_value[3], self.particle_d_eng_recov)
self.assertEqual(self.file_ingested, True)
def test_get_many_telemetered(self):
"""
Read test data and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(1)
self.assert_result(result, 24, self.particle_a_start_time_telem, False)
result = self.parser.get_records(4)
self.assertEqual(result, [self.particle_a_eng_telem,
self.particle_b_eng_telem,
self.particle_c_eng_telem,
self.particle_d_eng_telem])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assertEqual(self.publish_callback_value[0], self.particle_a_eng_telem)
self.assertEqual(self.publish_callback_value[1], self.particle_b_eng_telem)
self.assertEqual(self.publish_callback_value[2], self.particle_c_eng_telem)
self.assertEqual(self.publish_callback_value[3], self.particle_d_eng_telem)
self.assertEqual(self.file_ingested, True)
def test_long_stream_recovered(self):
"""
Test a long stream of data
"""
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(1)
self.assert_result(result, 24, self.particle_a_start_time_recov, False)
result = self.parser.get_records(32)
self.assertEqual(result[0], self.particle_a_eng_recov)
self.assertEqual(result[-1], self.particle_last_eng_recov)
self.assertEqual(self.parser._state[StateKey.POSITION], 856)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 856)
self.assertEqual(self.publish_callback_value[-1], self.particle_last_eng_recov)
result = self.parser.get_records(1)
self.assert_result(result, 872, self.particle_a_status_recov, True)
def test_long_stream_telemetered(self):
"""
Test a long stream of data
"""
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(1)
self.assert_result(result, 24, self.particle_a_start_time_telem, False)
result = self.parser.get_records(32)
self.assertEqual(result[0], self.particle_a_eng_telem)
self.assertEqual(result[-1], self.particle_last_eng_telem)
self.assertEqual(self.parser._state[StateKey.POSITION], 856)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 856)
self.assertEqual(self.publish_callback_value[-1], self.particle_last_eng_telem)
result = self.parser.get_records(1)
self.assert_result(result, 872, self.particle_a_status_telem, True)
def test_after_header_recovered(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION: 24}
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED), new_state, self.stream_handle,
self.state_callback, self.pub_callback)
# get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng_recov, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng_recov, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_recov, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_recov, True)
def test_after_header_telemetered(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION: 24}
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED), new_state, self.stream_handle,
self.state_callback, self.pub_callback)
# get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng_telem, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng_telem, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_telem, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_telem, True)
def test_mid_state_start_recovered(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION:76}
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED), new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_recov, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_recov, True)
def test_mid_state_start_telemetered(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION:76}
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED), new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_telem, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_telem, True)
def test_set_state_recovered(self):
"""
Test changing to a new state after initializing the parser and
reading data, as if new data has been found and the state has
changed
"""
new_state = {StateKey.POSITION: 76}
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(1)
self.assert_result(result, 24, self.particle_a_start_time_recov, False)
# set the new state, the essentially skips engineering a and b
self.parser.set_state(new_state)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_recov, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_recov, True)
def test_set_state_telemetered(self):
"""
Test changing to a new state after initializing the parser and
reading data, as if new data has been found and the state has
changed
"""
new_state = {StateKey.POSITION: 76}
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(1)
self.assert_result(result, 24, self.particle_a_start_time_telem, False)
# set the new state, the essentially skips engineering a and b
self.parser.set_state(new_state)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_telem, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_telem, True)
def test_bad_flags_recovered(self):
"""
test that we don't parse any records when the flags are not what we expect
"""
with self.assertRaises(SampleException):
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_BAD_FLAGS)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
def test_bad_flags_telemetered(self):
"""
test that we don't parse any records when the flags are not what we expect
"""
with self.assertRaises(SampleException):
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_BAD_FLAGS)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
def test_bad_data_recovered(self):
"""
Ensure that missing data causes us to miss records
TODO: This test should be improved if we come up with a more accurate regex for the data sample
"""
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_BAD_ENG)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(1)
self.assert_result(result, 24, self.particle_a_start_time_recov, False)
# next get engineering records
result = self.parser.get_records(4)
if len(result) == 4:
self.fail("We got 4 records, the bad data should only make 3")
def test_bad_data_telemetered(self):
"""
Ensure that missing data causes us to miss records
TODO: This test should be improved if we come up with a more accurate regex for the data sample
"""
self.stream_handle = StringIO(WfpEngStcImodemParserUnitTestCase.TEST_DATA_BAD_ENG)
self.parser = WfpEngStcImodemParser(
self.config.get(DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED), self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(1)
self.assert_result(result, 24, self.particle_a_start_time_telem, False)
# next get engineering records
result = self.parser.get_records(4)
if len(result) == 4:
self.fail("We got 4 records, the bad data should only make 3")
def particle_to_yml(self, particles, filename, mode='w'):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml fids here.
"""
# open write append, if you want to start from scratch manually delete this fid
fid = open(os.path.join(RESOURCE_PATH, filename), mode)
fid.write('header:\n')
fid.write(" particle_object: 'MULTIPLE'\n")
fid.write(" particle_type: 'MULTIPLE'\n")
fid.write('data:\n')
for i in range(0, len(particles)):
particle_dict = particles[i].generate_dict()
fid.write(' - _index: %d\n' %(i+1))
fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %16.16f\n' % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
| |
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.text
#import matplotlib.artist as artist
#import matplotlib.colors as colors
#import matplotlib.patches as patches
#import matplotlib.mathtext as mathtext
#import matplotlib.image as image
from matplotlib.lines import Line2D
#from matplotlib.widgets import Button
class ParamSpec(object):
"""A specification for a parameter curve."""
@property
def x(self):
return [x for (x,y) in self.targets]
@property
def xfmt(self):
return self._xfmt
@xfmt.setter
def xfmt(self, fmt):
self._xfmt = "{:" + fmt + "}"
@property
def y(self):
return [y for (x,y) in self.targets]
@property
def yfmt(self):
return self._yfmt
@yfmt.setter
def yfmt(self, fmt):
self._yfmt = "{:" + fmt + "}"
@property
def interp_y(self):
"""Return an interpolated y value for every x in grid_x.
"""
interp_y = np.empty(self.grid_x.shape)
interp_y[:] = np.nan
for tgt1,tgt2 in zip(self.targets[:], self.targets[1:]):
idx1 = np.nonzero(self.grid_x == tgt1[0])[0][0]
idx2 = np.nonzero(self.grid_x == tgt2[0])[0][0]
# snap_to_grid() doesn't snap if len(grid_y) == 2
interp_y[idx1:idx2+1] = self.snap_to_grid(y=np.linspace(tgt1[1], tgt2[1], idx2 - idx1 + 1))
#if len(self.grid_y) == 2:
# interp_y[idx1:idx2+1] = np.linspace(tgt1[1], tgt2[1], idx2 - idx1 + 1)
#else:
#interp_y[idx1:idx2+1] = self.snap_to_grid(y=np.linspace(tgt1[1], tgt2[1], idx2 - idx1 + 1))
#interp_y[idx1:idx2+1] = np.round(np.linspace(tgt1[1], tgt2[1], idx2 - idx1 + 1))
return interp_y
@property
def norm_interp_y(self):
y = self.interp_y
scale = np.max(self.grid_y) - np.min(self.grid_y)
if scale != 0:
#norm_y = (y - y.min())/max(abs(self.grid_y))
y = (y - np.min(y)) / scale
return y
def __init__(self, name, grid_x, grid_y, default_y=None, xfmt="0.3f", yfmt="0.3f", targets=[], manager=None, lineprops={}):
"""Constructor."""
self.name = name
self.grid_x = np.array(grid_x)
self.grid_y = np.array(grid_y)
if default_y == None:
self.default_y = self.snap_to_grid(y=np.mean(grid_y))
else:
self.default_y = self.snap_to_grid(y=default_y)
self.targets = targets
self.manager = manager
self.xfmt = xfmt
self.yfmt = yfmt
self.lineprops = lineprops
def add_target(self, add_x, add_y):
add_x = self.snap_to_grid(x=add_x)
add_y = self.snap_to_grid(y=add_y)
try:
# Find the first existing x >= the x to be added and insert before it if >, or
# overwrite it if they are ==.
idx = [x >= add_x for x in self.x].index(True)
if self.targets[idx][0] == add_x:
self.targets[idx] = (add_x, add_y)
else:
self.targets.insert(idx, (add_x, add_y))
except ValueError: # Couldn't find one.
if self.targets == []: # There are no existing x.
self.targets = [(add_x, add_y)]
else: # x to be added > all existing x.
self.targets.append((add_x, add_y))
except:
raise("Unexpected error:", sys.exc_info()[0])
raise
self.manager({'event': 'data_changed', 'paramspec': self})
def del_target(self, del_x):
del_x = self.snap_to_grid(x=del_x)
if del_x != self.grid_x[0] and del_x != self.grid_x[-1]:
idx = [x == del_x for x in self.x].index(True)
del self.targets[idx]
self.manager({'event': 'data_changed', 'paramspec': self})
def del_target_by_idx(self, idx):
"""Delete a target by its index in self.targets."""
if idx != 0 and idx != (len(self.targets) - 1):
del self.targets[idx]
self.manager({'event': 'data_changed', 'paramspec': self})
# Parameters are 'x' and 'y'. We use **kwargs to force you to specify them
# as a keyword argument, e.g.
# snap_to_grid(y=2.9)
# snap_to_grid(x=1.5)
# If we didn't do this, you might accidentally try to snap a y value to grid_x.
def snap_to_grid(self, **kwargs):
"""Snap coordinates to allowed values."""
if len(kwargs.keys()) != 1:
raise("Wrong number of arguments for snap_to_grid(). Must be one of 'x=' or 'y=' only.")
retval = None
if 'x' in kwargs.keys():
if len(self.grid_x) == 2:
retval = kwargs['x']
else:
grid_diff = abs(self.grid_x - kwargs['x'])
snapidx = grid_diff.tolist().index(min(grid_diff))
retval = self.grid_x[snapidx]
elif 'y' in kwargs.keys():
if len(self.grid_y) == 2:
retval = kwargs['y'] # don't snap to grid
else:
# TODO: this could probably be made more elegant/vectorized
# TODO: handle an array of x as well
try: # array of y values
grid_diff = [np.abs(self.grid_y - y) for y in kwargs['y']]
snapidx = [arr.tolist().index(min(arr)) for arr in grid_diff]
retval = self.grid_y[snapidx]
except TypeError: # whoops, must have been a single y value
grid_diff = abs(self.grid_y - kwargs['y'])
snapidx = grid_diff.tolist().index(min(grid_diff))
retval = self.grid_y[snapidx]
return retval
class ParamDrawAxes(object):
"""Axes for interactively drawing a parameter curve.
"""
@property
def paramspec(self):
return self._paramspec
@paramspec.setter
def paramspec(self, value):
self._paramspec = value
self.style_axes()
self.line.set_data(self.paramspec.x, self.paramspec.y)
self.interp_line.set_data(self.paramspec.grid_x, self.paramspec.interp_y)
self._update_param()
def __init__(self, ax, motion_callback=None):
"""Constructor.
Add the parameter line to a figure.
"""
self._paramspec = None
self.ax = ax
self.ax.set_xticklabels([])
# Empty line
line = Line2D([], [], ls='--', c='#666666',
marker='x', mew=2, mec='#204a87', picker=5)
ax.add_line(line)
interp_line = Line2D([], [], c='#333333', alpha=0.5)
ax.add_line(interp_line)
self.style_axes()
self.line = line
self.position_text = matplotlib.text.Text()
self.ax.add_artist(self.position_text)
self.position_marker = matplotlib.text.Text(text='+', color='red', horizontalalignment='center', verticalalignment='center')
self.ax.add_artist(self.position_marker)
self.interp_line = interp_line
self.canvas = line.figure.canvas
self._deleting_marker = False
# Event handler for mouse clicking in axes.
self.cid = self.canvas.mpl_connect('button_release_event', self)
# Callback for mouse clicking on markers.
self.canvas.callbacks.connect('pick_event', self.on_marker_pick)
# Callback to keep statusbar up to date with position
if motion_callback != None:
self.canvas.callbacks.connect('motion_notify_event', motion_callback)
def style_axes(self):
self.ax.xaxis.grid(color='gray')
try:
assert(len(self.paramspec.grid_y) > 2)
self.ax.yaxis.grid(color='gray')
except (AttributeError, AssertionError): # self.paramspec == None or grid_y !> 2
self.ax.yaxis.grid(False)
try:
self.ax.set_ylim(self.paramspec.grid_y[0], self.paramspec.grid_y[-1])
self.ax.set_xticks(self.paramspec.grid_x)
self.ax.set_yticks(self.paramspec.grid_y)
self.ax.set_title("Drawing parameter: {:s}".format(self.paramspec.name))
except AttributeError: # self.paramspec == None
pass
def __call__(self, event):
if event.inaxes != self.line.axes:
return
if self._deleting_marker:
self._deleting_marker = False
else:
self.paramspec.add_target(event.xdata, event.ydata)
self.line.set_data(self.paramspec.x, self.paramspec.y)
self.interp_line.set_data(self.paramspec.grid_x, self.paramspec.interp_y)
self._update_param()
def _update_param(self):
self.canvas.draw()
def on_marker_pick(self, event):
self._deleting_marker = True
self.paramspec.del_target_by_idx(event.ind)
self.line.set_data(self.paramspec.x, self.paramspec.y)
self.interp_line.set_data(self.paramspec.grid_x, self.paramspec.interp_y)
self._update_param()
def on_mouse_motion(self, event):
self.position_text.set_text(event['msg'])
self.position_text.set_position((event['x'], event['y']))
if event['x'] >= np.mean(self.paramspec.grid_x):
self.position_text.set_horizontalalignment('right')
else:
self.position_text.set_horizontalalignment('left')
if event['y'] >= np.mean(self.paramspec.grid_y):
self.position_text.set_verticalalignment('top')
else:
self.position_text.set_verticalalignment('bottom')
self.position_marker.set_position((event['x'], event['y']))
self._update_param()
class ParamShowAxes(object):
"""Axes showing parameter curves.
"""
def __init__(self, ax):
"""Constructor.
Add the parameter line to a figure.
"""
self.ax = ax
self.ax.set_yticklabels([])
self._paramspecs = {}
self.canvas = ax.figure.canvas
def add_paramspec(self, paramspec):
"""Add a ParamSpec to the axes."""
line = Line2D(paramspec.grid_x, paramspec.norm_interp_y, **paramspec.lineprops)
self.ax.add_line(line)
self.canvas.draw()
self._paramspecs[paramspec.name] = {'paramspec': paramspec, 'line': line}
def redraw(self):
for pspec in self._paramspecs.values():
pspec['line'].set_data(pspec['paramspec'].grid_x, pspec['paramspec'].norm_interp_y)
self.canvas.draw()
class ParamSpecManager(object):
def __init__(self, paramspecs, draw_axes, show_axes, motion_callback=None):
#fig, ax = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(24,12))
self.figure = draw_axes.figure
self.draw_axes = draw_axes
self.show_axes = show_axes
min_x = np.min([x.grid_x[0] for x in paramspecs.values()])
max_x = np.max([x.grid_x[-1] for x in paramspecs.values()])
self.draw_axes.set_xlim([min_x, max_x])
self.show_axes.set_xlim([min_x, max_x])
self.paramspecs = paramspecs
pdx = ParamDrawAxes(self.draw_axes, motion_callback=motion_callback)
self.pdx = pdx
psx = ParamShowAxes(self.show_axes)
self.psx = psx
for pspec in paramspecs.values():
pspec.manager = self
self.add_target_to(pspec.name, pspec.grid_x[0], pspec.default_y)
self.add_target_to(pspec.name, pspec.grid_x[-1], pspec.default_y)
psx.add_paramspec(pspec)
def __call__(self, event):
if event['event'] == 'data_changed':
self.psx.redraw()
def add_target_to(self, name, add_x, add_y):
"""Add a target to ParamSpec identified by name."""
self.paramspecs[name].add_target(add_x, add_y)
def show(self):
plt.show()
def select_paramspec(self, name):
if name in self.paramspecs.keys():
self.pdx.paramspec = self.paramspecs[name]
| |
# =============================================================================
# Copyright (c) 2016, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from flask import Blueprint
from flask import abort
from flask import render_template
from flask import request
from flask import jsonify
from flask import redirect
from flask import url_for
from flask import send_file
from flask_login import login_required
from flask_login import current_user
from wtforms import Form
from wtforms import StringField
from wtforms import PasswordField
from wtforms import SelectField
from database import DBSession
from models import SystemOption
from models import SMUMeta
from models import logger
from models import SMUInfo
from models import Preferences
from common import get_host
from common import get_user_by_id
from common import get_server_list
from common import fill_servers
from common import get_host_active_packages
from common import create_download_jobs
from common import can_check_reachability
from forms import ServerDialogForm
from forms import SoftwareProfileForm
from forms import ExportInformationForm
from forms import BrowseServerDialogForm
from constants import UNKNOWN
from constants import PlatformFamily
from constants import BUG_SEARCH_URL
from constants import get_repository_directory
from constants import ExportInformationFormat
from constants import ExportSoftwareInformationLayout
from filters import beautify_platform
from filters import time_difference_UTC
from filters import get_datetime_string
from smu_info_loader import SMUInfoLoader
from utils import is_empty
from utils import get_file_list
from utils import get_json_value
from utils import get_software_platform
from utils import comma_delimited_str_to_list
from report_writer import ExportSoftwareInfoHTMLConciseWriter
from report_writer import ExportSoftwareInfoHTMLDefaultWriter
from report_writer import ExportSoftwareInfoExcelConciseWriter
from report_writer import ExportSoftwareInfoExcelDefaultWriter
from smu_utils import SMU_INDICATOR
from smu_utils import get_optimized_list
from cisco_service.bug_service import BugServiceHandler
from cisco_service.bsd_service import BSDServiceHandler
cco = Blueprint('cco', __name__, url_prefix='/cco')
@cco.route('/platform/<platform>/release/<release>')
@login_required
def home(platform, release):
system_option = SystemOption.get(DBSession())
form = BrowseServerDialogForm(request.form)
fill_servers(form.dialog_server.choices, get_server_list(DBSession()), False)
export_software_information_form = ExportSoftwareInformationForm(request.form)
return render_template('cco/home.html', form=form, platform=platform,
release=release, system_option=system_option,
export_software_information_form=export_software_information_form)
@cco.route('/api/get_cco_retrieval_elapsed_time/platform/<platform>/release/<release>')
@login_required
def api_get_cco_retrieval_elapsed_time(platform, release):
smu_meta = DBSession().query(SMUMeta).filter(SMUMeta.platform_release == platform + '_' + release).first()
retrieval_elapsed_time = UNKNOWN
if smu_meta is not None:
retrieval_elapsed_time = time_difference_UTC(smu_meta.retrieval_time)
return jsonify(**{'data': [{'retrieval_elapsed_time': retrieval_elapsed_time}]})
@cco.route('/api/create_download_jobs', methods=['POST'])
@login_required
def api_create_download_jobs():
try:
server_id = request.form.get("server_id")
server_directory = request.form.get("server_directory")
smu_list = request.form.get("smu_list").split()
pending_downloads = request.form.get("pending_downloads").split()
# Derives the platform and release using the first SMU name.
if len(smu_list) > 0 and len(pending_downloads) > 0:
platform, release = SMUInfoLoader.get_platform_and_release(smu_list)
create_download_jobs(DBSession(), platform, release, pending_downloads, server_id, server_directory, current_user.username)
return jsonify({'status': 'OK'})
except:
logger.exception('api_create_download_jobs() hit exception')
return jsonify({'status': 'Failed'})
@cco.route('/api/get_smu_details/smu_id/<smu_id>')
@login_required
def api_get_smu_details(smu_id):
rows = []
db_session = DBSession()
smu_info = db_session.query(SMUInfo).filter(SMUInfo.id == smu_id).first()
if smu_info is not None:
row = dict()
row['id'] = smu_info.id
row['name'] = smu_info.name
row['status'] = smu_info.status
row['type'] = smu_info.type
row['posted_date'] = smu_info.posted_date
row['ddts'] = smu_info.ddts
row['description'] = smu_info.description
row['functional_areas'] = smu_info.functional_areas
row['impact'] = smu_info.impact
row['package_names'] = smu_info.package_names
row['package_md5'] = smu_info.package_md5
row['package_bundles'] = smu_info.package_bundles
row['compressed_image_size'] = str(smu_info.compressed_image_size)
row['uncompressed_image_size'] = str(smu_info.uncompressed_image_size)
row['prerequisites'] = smu_info.prerequisites
row['supersedes'] = smu_info.supersedes
row['superseded_by'] = smu_info.superseded_by
row['composite_DDTS'] = smu_info.composite_DDTS
row['prerequisites_smu_ids'] = get_smu_ids(db_session, smu_info.prerequisites)
row['supersedes_smu_ids'] = get_smu_ids(db_session, smu_info.supersedes)
row['superseded_by_smu_ids'] = get_smu_ids(db_session, smu_info.superseded_by)
rows.append(row)
return jsonify(**{'data': rows})
def get_smu_ids(db_session, smu_name_list):
smu_ids = []
smu_names = comma_delimited_str_to_list(smu_name_list)
for smu_name in smu_names:
smu_info = db_session.query(SMUInfo).filter(SMUInfo.name == smu_name).first()
if smu_info is not None:
smu_ids.append(smu_info.id)
else:
smu_ids.append(UNKNOWN)
return ','.join([id for id in smu_ids])
@cco.route('/api/get_ddts_details/ddts_id/<ddts_id>')
@login_required
def api_get_ddts_details(ddts_id):
username = Preferences.get(DBSession(), current_user.id).cco_username
password = Preferences.get(DBSession(), current_user.id).cco_password
bsh = BugServiceHandler(username, password, ddts_id)
try:
bug_info = bsh.get_bug_info()
except Exception as e:
logger.exception('api_get_ddts_details() hit exception ' + e.message)
if e.message == 'access_token':
error_msg = 'Could not retrieve bug information. The username and password defined may not be correct ' \
'(Check Tools - User Preferences)'
else:
error_msg = 'Could not retrieve bug information.'
return jsonify(**{'data': {'ErrorMsg': error_msg}})
info = {}
statuses = {'O': 'Open',
'F': 'Fixed',
'T': 'Terminated'}
severities = {'1': "1 Catastrophic",
'2': "2 Severe",
'3': "3 Moderate",
'4': "4 Minor",
'5': "5 Cosmetic",
'6': "6 Enhancement"}
info['status'] = statuses[get_json_value(bug_info, 'status')] \
if get_json_value(bug_info, 'status') in statuses else get_json_value(bug_info, 'status')
info['product'] = get_json_value(bug_info, 'product')
info['severity'] = severities[get_json_value(bug_info, 'severity')] \
if get_json_value(bug_info, 'severity') in severities else get_json_value(bug_info, 'severity')
info['headline'] = get_json_value(bug_info, 'headline')
info['support_case_count'] = get_json_value(bug_info, 'support_case_count')
info['last_modified_date'] = get_json_value(bug_info, 'last_modified_date')
info['bug_id'] = get_json_value(bug_info, 'bug_id')
info['created_date'] = get_json_value(bug_info, 'created_date')
info['duplicate_of'] = get_json_value(bug_info, 'duplicate_of')
info['description'] = get_json_value(bug_info, 'description').replace('\n', '<br>') \
if get_json_value(bug_info, 'description') else None
info['known_affected_releases'] = get_json_value(bug_info, 'known_affected_releases').replace(' ', '<br>') \
if get_json_value(bug_info, 'known_affected_releases') else None
info['known_fixed_releases'] = get_json_value(bug_info, 'known_fixed_releases').replace(' ', '<br>') \
if get_json_value(bug_info, 'known_fixed_releases') else None
info['error_description'] = get_json_value(bug_info, 'ErrorDescription')
info['suggested_action'] = get_json_value(bug_info, 'SuggestedAction')
return jsonify(**{'data': info})
@cco.route('/user_preferences', methods=['GET','POST'])
@login_required
def user_preferences():
db_session = DBSession()
form = PreferencesForm(request.form)
user = get_user_by_id(db_session, current_user.id)
if request.method == 'POST' and form.validate():
user.preferences[0].cco_username = form.cco_username.data
if len(form.cco_password.data) > 0:
user.preferences[0].cco_password = form.cco_password.data
# All the checked checkboxes (i.e. platforms and releases to exclude).
values = request.form.getlist('check')
excluded_platform_list = ','.join(values)
preferences = Preferences.get(db_session, current_user.id)
preferences.excluded_platforms_and_releases = excluded_platform_list
db_session.commit()
return redirect(url_for('home'))
else:
preferences = user.preferences[0]
form.cco_username.data = preferences.cco_username
if not is_empty(user.preferences[0].cco_password):
form.password_placeholder = 'Use Password on File'
else:
form.password_placeholder = 'No Password Specified'
return render_template('cco/preferences.html', form=form,
platforms_and_releases=get_platforms_and_releases_dict(db_session))
def get_platforms_and_releases_dict(db_session):
excluded_platform_list = []
preferences = Preferences.get(db_session, current_user.id)
# It is possible that the preferences have not been created yet.
if preferences is not None and preferences.excluded_platforms_and_releases is not None:
excluded_platform_list = preferences.excluded_platforms_and_releases.split(',')
rows = []
catalog = SMUInfoLoader.get_catalog()
if len(catalog) > 0:
for platform in catalog:
releases = catalog[platform]
for release in releases:
row = dict()
row['platform'] = platform
row['release'] = release
row['excluded'] = True if platform + '_' + release in excluded_platform_list else False
rows.append(row)
else:
# If get_catalog() failed, populate the excluded platforms and releases
for platform_and_release in excluded_platform_list:
pos = platform_and_release.rfind('_')
if pos > 0:
row = dict()
row['platform'] = platform_and_release[:pos]
row['release'] = platform_and_release[pos+1:]
row['excluded'] = True
rows.append(row)
return rows
@cco.route('/software/export/platform/<platform>/release/<release>', methods=['POST'])
@login_required
def export_software_information(platform, release):
smu_loader = SMUInfoLoader(platform, release)
if not smu_loader.is_valid:
return jsonify({'status': 'Failed'})
export_format = request.args.get('export_format')
export_layout = request.args.get('export_layout')
export_filter = request.args.get('filter')
if export_filter == 'Optimal':
smu_list = smu_loader.get_optimal_smu_list()
sp_list = smu_loader.get_optimal_sp_list()
else:
smu_list = smu_loader.get_smu_list()
sp_list = smu_loader.get_sp_list()
if export_format == ExportInformationFormat.HTML:
if export_layout == ExportSoftwareInformationLayout.CONCISE:
writer = ExportSoftwareInfoHTMLConciseWriter(user=current_user, smu_loader=smu_loader,
smu_list=smu_list, sp_list=sp_list)
else:
writer = ExportSoftwareInfoHTMLDefaultWriter(user=current_user, smu_loader=smu_loader,
smu_list=smu_list, sp_list=sp_list)
else:
if export_layout == ExportSoftwareInformationLayout.CONCISE:
writer = ExportSoftwareInfoExcelConciseWriter(user=current_user, smu_loader=smu_loader,
smu_list=smu_list, sp_list=sp_list)
else:
writer = ExportSoftwareInfoExcelDefaultWriter(user=current_user, smu_loader=smu_loader,
smu_list=smu_list, sp_list=sp_list)
return send_file(writer.write_report(), as_attachment=True)
@cco.route('/api/check_cisco_authentication/', methods=['POST'])
@login_required
def check_cisco_authentication():
preferences = Preferences.get(DBSession(), current_user.id)
if preferences is not None:
if not is_empty(preferences.cco_username) and not is_empty(preferences.cco_password):
return jsonify({'status': 'OK'})
return jsonify({'status': 'Failed'})
@cco.route('/optimize_software')
@login_required
def optimize_software():
server_dialog_form = ServerDialogForm(request.form)
software_profile_form = SoftwareProfileForm(request.form)
return render_template('cco/optimize_software.html',
server_dialog_form=server_dialog_form,
software_profile_form=software_profile_form,
system_option=SystemOption.get(DBSession()))
def get_filtered_platform_list(platform, releases, excluded_platform_list):
result_list = []
for release in releases:
if platform + '_' + release not in excluded_platform_list:
result_list.append(release)
return result_list
@cco.route('/api/get_catalog')
@login_required
def api_get_catalog():
db_session = DBSession()
excluded_platform_list = []
preferences = Preferences.get(db_session, current_user.id)
if preferences.excluded_platforms_and_releases is not None:
excluded_platform_list = preferences.excluded_platforms_and_releases.split(',')
rows = []
catalog = SMUInfoLoader.get_catalog()
for platform in catalog:
releases = get_filtered_platform_list(platform, catalog[platform], excluded_platform_list)
if len(releases) > 0:
row = dict()
row['platform'] = platform
row['beautified_platform'] = beautify_platform(platform)
row['releases'] = releases
rows.append(row)
return jsonify(**{'data': rows})
@cco.route('/api_fetch_cco_software/platform/<platform>/release/<release>')
@login_required
def api_fetch_cco_software(platform, release):
smu_loader = SMUInfoLoader(platform, release)
return jsonify({'status': 'OK' if smu_loader.is_valid else 'Failed'})
@cco.route('/api/get_smu_list/platform/<platform>/release/<release>')
@login_required
def api_get_smu_list(platform, release):
smu_loader = SMUInfoLoader(platform, release, from_cco=False)
if not smu_loader.is_valid:
return jsonify(**{'data': []})
hostname = request.args.get('hostname')
hide_installed_packages = request.args.get('hide_installed_packages')
if request.args.get('filter') == 'Optimal':
return get_smu_or_sp_list(hostname, hide_installed_packages,
smu_loader.get_optimal_smu_list(), smu_loader.file_suffix)
else:
return get_smu_or_sp_list(hostname, hide_installed_packages,
smu_loader.get_smu_list(), smu_loader.file_suffix)
@cco.route('/api/get_sp_list/platform/<platform>/release/<release>')
@login_required
def api_get_sp_list(platform, release):
smu_loader = SMUInfoLoader(platform, release, from_cco=False)
if not smu_loader.is_valid:
return jsonify(**{'data': []})
hostname = request.args.get('hostname')
hide_installed_packages = request.args.get('hide_installed_packages')
if request.args.get('filter') == 'Optimal':
return get_smu_or_sp_list(hostname, hide_installed_packages,
smu_loader.get_optimal_sp_list(), smu_loader.file_suffix)
else:
return get_smu_or_sp_list(hostname, hide_installed_packages,
smu_loader.get_sp_list(), smu_loader.file_suffix)
@cco.route('/api/get_tar_list/platform/<platform>/release/<release>')
@login_required
def api_get_tar_list(platform, release):
smu_loader = SMUInfoLoader(platform, release, from_cco=False)
if not smu_loader.is_valid:
return jsonify(**{'data': []})
else:
file_list = get_file_list(get_repository_directory(), '.tar')
tars_list = smu_loader.get_tar_list()
rows = []
for tar_info in tars_list:
row = dict()
row['ST'] = 'True' if tar_info.name in file_list else 'False'
row['name'] = tar_info.name
row['compressed_size'] = tar_info.compressed_image_size
row['description'] = ""
rows.append(row)
return jsonify(**{'data': rows})
def get_smu_or_sp_list(hostname, hide_installed_packages, smu_info_list, file_suffix):
"""
Return the SMU/SP list. If hostname is given, compare its active packages.
"""
file_list = get_file_list(get_repository_directory(), '.' + file_suffix)
host_packages = [] if is_empty(hostname) else get_host_active_packages(hostname)
check_package_bundles = False
if not is_empty(hostname):
db_session = DBSession()
host = get_host(db_session, hostname)
if host is not None:
software_platform = get_software_platform(host.family, host.os_type)
# Only for ASR9K, other platforms do not follow the definition of package_bundles
# (i.e., the values values in the package_bundles cannot be used to compare with
# the package list on the device).
if software_platform == PlatformFamily.ASR9K:
check_package_bundles = True
rows = []
for smu_info in smu_info_list:
# Verify if the package has already been installed.
installed = False
for host_package in host_packages:
if smu_info.name in host_package:
installed = True
break
include = False if (hide_installed_packages == 'true' and installed) else True
if include:
row = dict()
image_name = smu_info.name + '.' + file_suffix
row['ST'] = 'True' if image_name in file_list else 'False'
row['package_name'] = image_name if is_empty(smu_info.package_names) else smu_info.package_names
row['posted_date'] = smu_info.posted_date.split()[0]
row['ddts'] = smu_info.ddts
row['ddts_url'] = BUG_SEARCH_URL + smu_info.ddts
row['type'] = smu_info.type
row['description'] = smu_info.description
row['impact'] = smu_info.impact
row['functional_areas'] = smu_info.functional_areas
row['id'] = smu_info.id
row['name'] = smu_info.name
row['status'] = smu_info.status
row['package_bundles'] = smu_info.package_bundles
row['compressed_image_size'] = smu_info.compressed_image_size
row['uncompressed_image_size'] = smu_info.uncompressed_image_size
row['is_installed'] = installed
row['is_applicable'] = True
if check_package_bundles and SMU_INDICATOR in smu_info.name:
row['is_applicable'] = is_smu_applicable(host_packages, smu_info.package_bundles)
rows.append(row)
return jsonify(**{'data': rows})
def is_smu_applicable(host_packages, required_package_bundles):
"""
Only SMU should go through this logic
The package_bundles defined must be satisfied for the SMU to be applicable.
However,asr9k-fpd-px can be excluded.
"""
if not is_empty(required_package_bundles):
package_bundles = required_package_bundles.split(',')
package_bundles = [p for p in package_bundles if p != 'asr9k-fpd-px']
count = 0
for package_bundle in package_bundles:
for host_package in host_packages:
if package_bundle in host_package:
count += 1
break
if count != len(package_bundles):
return False
return True
@cco.route('/api/optimize_software', methods=['POST'])
@login_required
def api_optimize_software():
package_list = request.form.getlist('package_list[]')
return jsonify(**{'data': get_optimized_list(package_list)})
@cco.route('/api/validate_cisco_user', methods=['POST'])
@login_required
def validate_cisco_user():
if not can_check_reachability(current_user):
abort(401)
try:
username = request.form['username']
password = request.form['password']
if len(password) == 0:
password = Preferences.get(DBSession(), current_user.id).cco_password
BSDServiceHandler.get_access_token(username, password)
return jsonify({'status': 'OK'})
except KeyError:
return jsonify({'status': 'Failed'})
except:
logger.exception('validate_cisco_user() hit exception')
return jsonify({'status': 'Failed'})
@cco.route('/api/refresh_all_smu_info')
@login_required
def api_refresh_all_smu_info():
if SMUInfoLoader.refresh_all():
return jsonify({'status': 'OK'})
else:
return jsonify({'status': 'Failed'})
@cco.route('/api/get_cco_lookup_time')
@login_required
def api_get_cco_lookup_time():
system_option = SystemOption.get(DBSession())
if system_option.cco_lookup_time is not None:
return jsonify(**{'data': [{'cco_lookup_time': get_datetime_string(system_option.cco_lookup_time)}]})
else:
return jsonify({'status': 'Failed'})
class PreferencesForm(Form):
cco_username = StringField('Username')
cco_password = PasswordField('Password')
class ExportSoftwareInformationForm(ExportInformationForm):
export_layout = SelectField('Layout', coerce=str,
choices=[(ExportSoftwareInformationLayout.CONCISE,
ExportSoftwareInformationLayout.CONCISE),
(ExportSoftwareInformationLayout.DEFAULT,
ExportSoftwareInformationLayout.DEFAULT)])
| |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for encoding protocol message primitives.
Contains the logic for encoding every logical protocol field type
into one of the 5 physical wire types.
This code is designed to push the Python interpreter's performance to the
limits.
The basic idea is that at startup time, for every field (i.e. every
FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The
sizer takes a value of this field's type and computes its byte size. The
encoder takes a writer function and a value. It encodes the value into byte
strings and invokes the writer function to write those strings. Typically the
writer function is the write() method of a BytesIO.
We try to do as much work as possible when constructing the writer and the
sizer rather than when calling them. In particular:
* We copy any needed global functions to local variables, so that we do not need
to do costly global table lookups at runtime.
* Similarly, we try to do any attribute lookups at startup time if possible.
* Every field's tag is encoded to bytes at startup, since it can't change at
runtime.
* Whatever component of the field size we can compute at startup, we do.
* We *avoid* sharing code if doing so would make the code slower and not sharing
does not burden us too much. For example, encoders for repeated fields do
not just call the encoders for singular fields in a loop because this would
add an extra function call overhead for every loop iteration; instead, we
manually inline the single-value encoder into the loop.
* If a Python function lacks a return statement, Python actually generates
instructions to pop the result of the last statement off the stack, push
None onto the stack, and then return that. If we really don't care what
value is returned, then we can save two instructions by returning the
result of the last statement. It looks funny but it helps.
* We assume that type and bounds checking has happened at a higher level.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
from google.protobuf.internal import wire_format
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _TagSize(field_number):
"""Returns the number of bytes required to serialize a tag with this field
number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarintSize(wire_format.PackTag(field_number, 0))
# --------------------------------------------------------------------
# In this section we define some generic sizers. Each of these functions
# takes parameters specific to a particular field type, e.g. int32 or fixed64.
# It returns another function which in turn takes parameters specific to a
# particular field, e.g. the field number and whether it is repeated or packed.
# Look at the next section to see how these are used.
def _SimpleSizer(compute_value_size):
"""A sizer which uses the function compute_value_size to compute the size of
each value. Typically compute_value_size is _VarintSize."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(element)
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(element)
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(value)
return FieldSize
return SpecificSizer
def _ModifiedSizer(compute_value_size, modify_value):
"""Like SimpleSizer, but modify_value is invoked on each value before it is
passed to compute_value_size. modify_value is typically ZigZagEncode."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(modify_value(element))
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(modify_value(element))
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(modify_value(value))
return FieldSize
return SpecificSizer
def _FixedSizer(value_size):
"""Like _SimpleSizer except for a fixed-size field. The input is the size
of one value."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = len(value) * value_size
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
element_size = value_size + tag_size
def RepeatedFieldSize(value):
return len(value) * element_size
return RepeatedFieldSize
else:
field_size = value_size + tag_size
def FieldSize(value):
return field_size
return FieldSize
return SpecificSizer
# ====================================================================
# Here we declare a sizer constructor for each field type. Each "sizer
# constructor" is a function that takes (field_number, is_repeated, is_packed)
# as parameters and returns a sizer, which in turn takes a field value as
# a parameter and returns its encoded size.
Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize)
UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize)
SInt32Sizer = SInt64Sizer = _ModifiedSizer(
_SignedVarintSize, wire_format.ZigZagEncode)
Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4)
Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8)
BoolSizer = _FixedSizer(1)
def StringSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a string field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element.encode('utf-8'))
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value.encode('utf-8'))
return tag_size + local_VarintSize(l) + l
return FieldSize
def BytesSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a bytes field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element)
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value)
return tag_size + local_VarintSize(l) + l
return FieldSize
def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize
def MessageSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a message field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = element.ByteSize()
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = value.ByteSize()
return tag_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# MessageSet is special: it needs custom logic to compute its size properly.
def MessageSetItemSizer(field_number):
"""Returns a sizer for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) +
_TagSize(3))
local_VarintSize = _VarintSize
def FieldSize(value):
l = value.ByteSize()
return static_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# Map is special: it needs custom logic to compute its size properly.
def MapSizer(field_descriptor, is_message_map):
"""Returns a sizer for a map field."""
# Can't look at field_descriptor.message_type._concrete_class because it may
# not have been initialized yet.
message_type = field_descriptor.message_type
message_sizer = MessageSizer(field_descriptor.number, False, False)
def FieldSize(map_value):
total = 0
for key in map_value:
value = map_value[key]
# It's wasteful to create the messages and throw them away one second
# later since we'll do the same for the actual encode. But there's not an
# obvious way to avoid this within the current design without tons of code
# duplication. For message map, value.ByteSize() should be called to
# update the status.
entry_msg = message_type._concrete_class(key=key, value=value)
total += message_sizer(entry_msg)
if is_message_map:
value.ByteSize()
return total
return FieldSize
# ====================================================================
# Encoders!
def _VarintEncoder():
"""Return an encoder for a basic varint value (does not include tag)."""
local_int2byte = struct.Struct('>B').pack
def EncodeVarint(write, value, unused_deterministic=None):
bits = value & 0x7f
value >>= 7
while value:
write(local_int2byte(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_int2byte(bits))
return EncodeVarint
def _SignedVarintEncoder():
"""Return an encoder for a basic signed varint value (does not include
tag)."""
local_int2byte = struct.Struct('>B').pack
def EncodeSignedVarint(write, value, unused_deterministic=None):
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(local_int2byte(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(local_int2byte(bits))
return EncodeSignedVarint
_EncodeVarint = _VarintEncoder()
_EncodeSignedVarint = _SignedVarintEncoder()
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value, True)
return b"".join(pieces)
def TagBytes(field_number, wire_type):
"""Encode the given tag and return the bytes. Only called at startup."""
return bytes(_VarintBytes(wire_format.PackTag(field_number, wire_type)))
# --------------------------------------------------------------------
# As with sizers (see above), we have a number of common encoder
# implementations.
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
"""Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize().
"""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value, deterministic):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size, deterministic)
for element in value:
encode_value(write, element, deterministic)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value, deterministic):
for element in value:
write(tag_bytes)
encode_value(write, element, deterministic)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value, deterministic):
write(tag_bytes)
return encode_value(write, value, deterministic)
return EncodeField
return SpecificEncoder
def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):
"""Like SimpleEncoder but additionally invokes modify_value on every value
before passing it to encode_value. Usually modify_value is ZigZagEncode."""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value, deterministic):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(modify_value(element))
local_EncodeVarint(write, size, deterministic)
for element in value:
encode_value(write, modify_value(element), deterministic)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value, deterministic):
for element in value:
write(tag_bytes)
encode_value(write, modify_value(element), deterministic)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value, deterministic):
write(tag_bytes)
return encode_value(write, modify_value(value), deterministic)
return EncodeField
return SpecificEncoder
def _StructPackEncoder(wire_type, format):
"""Return a constructor for an encoder for a fixed-width field.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value, deterministic):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size, deterministic)
for element in value:
write(local_struct_pack(format, element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value, unused_deterministic=None):
for element in value:
write(tag_bytes)
write(local_struct_pack(format, element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value, unused_deterministic=None):
write(tag_bytes)
return write(local_struct_pack(format, value))
return EncodeField
return SpecificEncoder
def _FloatingPointEncoder(wire_type, format):
"""Return a constructor for an encoder for float fields.
This is like StructPackEncoder, but catches errors that may be due to
passing non-finite floating-point values to struct.pack, and makes a
second attempt to encode those values.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
if value_size == 4:
def EncodeNonFiniteOrRaise(write, value):
# Remember that the serialized form uses little-endian byte order.
if value == _POS_INF:
write(b'\x00\x00\x80\x7F')
elif value == _NEG_INF:
write(b'\x00\x00\x80\xFF')
elif value != value: # NaN
write(b'\x00\x00\xC0\x7F')
else:
raise
elif value_size == 8:
def EncodeNonFiniteOrRaise(write, value):
if value == _POS_INF:
write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F')
elif value == _NEG_INF:
write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF')
elif value != value: # NaN
write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F')
else:
raise
else:
raise ValueError('Can\'t encode floating-point values that are '
'%d bytes long (only 4 or 8)' % value_size)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value, deterministic):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size, deterministic)
for element in value:
# This try/except block is going to be faster than any code that
# we could write to check whether element is finite.
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value, unused_deterministic=None):
for element in value:
write(tag_bytes)
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value, unused_deterministic=None):
write(tag_bytes)
try:
write(local_struct_pack(format, value))
except SystemError:
EncodeNonFiniteOrRaise(write, value)
return EncodeField
return SpecificEncoder
# ====================================================================
# Here we declare an encoder constructor for each field type. These work
# very similarly to sizer constructors, described earlier.
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
SInt32Encoder = SInt64Encoder = _ModifiedEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize,
wire_format.ZigZagEncode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f')
DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = b'\x00'
true_byte = b'\x01'
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value, deterministic):
write(tag_bytes)
local_EncodeVarint(write, len(value), deterministic)
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value, unused_deterministic=None):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value, unused_deterministic=None):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value, deterministic):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded), deterministic)
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value, deterministic):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded), deterministic)
return write(encoded)
return EncodeField
def BytesEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a bytes field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value, deterministic):
for element in value:
write(tag)
local_EncodeVarint(write, local_len(element), deterministic)
write(element)
return EncodeRepeatedField
else:
def EncodeField(write, value, deterministic):
write(tag)
local_EncodeVarint(write, local_len(value), deterministic)
return write(value)
return EncodeField
def GroupEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a group field."""
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value, deterministic):
for element in value:
write(start_tag)
element._InternalSerialize(write, deterministic)
write(end_tag)
return EncodeRepeatedField
else:
def EncodeField(write, value, deterministic):
write(start_tag)
value._InternalSerialize(write, deterministic)
return write(end_tag)
return EncodeField
def MessageEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a message field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value, deterministic):
for element in value:
write(tag)
local_EncodeVarint(write, element.ByteSize(), deterministic)
element._InternalSerialize(write, deterministic)
return EncodeRepeatedField
else:
def EncodeField(write, value, deterministic):
write(tag)
local_EncodeVarint(write, value.ByteSize(), deterministic)
return value._InternalSerialize(write, deterministic)
return EncodeField
# --------------------------------------------------------------------
# As before, MessageSet is special.
def MessageSetItemEncoder(field_number):
"""Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
start_bytes = b"".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_EncodeVarint = _EncodeVarint
def EncodeField(write, value, deterministic):
write(start_bytes)
local_EncodeVarint(write, value.ByteSize(), deterministic)
value._InternalSerialize(write, deterministic)
return write(end_bytes)
return EncodeField
# --------------------------------------------------------------------
# As before, Map is special.
def MapEncoder(field_descriptor):
"""Encoder for extensions of MessageSet.
Maps always have a wire format like this:
message MapEntry {
key_type key = 1;
value_type value = 2;
}
repeated MapEntry map = N;
"""
# Can't look at field_descriptor.message_type._concrete_class because it may
# not have been initialized yet.
message_type = field_descriptor.message_type
encode_message = MessageEncoder(field_descriptor.number, False, False)
def EncodeField(write, value, deterministic):
value_keys = sorted(value.keys()) if deterministic else value
for key in value_keys:
entry_msg = message_type._concrete_class(key=key, value=value[key])
encode_message(write, entry_msg, deterministic)
return EncodeField
| |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import TestCase
from kmip.core.objects import ExtensionInformation
from kmip.core.objects import ExtensionName
from kmip.core.objects import ExtensionTag
from kmip.core.objects import ExtensionType
from kmip.core.utils import BytearrayStream
class TestExtensionInformation(TestCase):
"""
A test suite for the ExtensionInformation class.
Test encodings obtained from Section 12.2 of the KMIP 1.1 Test Cases
documentation.
"""
def setUp(self):
super(TestExtensionInformation, self).setUp()
self.extension_name_b = ExtensionName('ACME LOCATION')
self.extension_name_c = ExtensionName('ACME LOCATION')
self.extension_name_d = ExtensionName('ACME ZIP CODE')
self.extension_tag_c = ExtensionTag(5548545)
self.extension_tag_d = ExtensionTag(5548546)
self.extension_type_c = ExtensionType(7)
self.extension_type_d = ExtensionType(2)
self.encoding_a = BytearrayStream(
b'\x42\x00\xA4\x01\x00\x00\x00\x08\x42\x00\xA5\x07\x00\x00\x00'
b'\x00')
self.encoding_b = BytearrayStream(
b'\x42\x00\xA4\x01\x00\x00\x00\x18\x42\x00\xA5\x07\x00\x00\x00\x0D'
b'\x41\x43\x4D\x45\x20\x4C\x4F\x43\x41\x54\x49\x4F\x4E\x00\x00'
b'\x00')
self.encoding_c = BytearrayStream(
b'\x42\x00\xA4\x01\x00\x00\x00\x38\x42\x00\xA5\x07\x00\x00\x00\x0D'
b'\x41\x43\x4D\x45\x20\x4C\x4F\x43\x41\x54\x49\x4F\x4E\x00\x00\x00'
b'\x42\x00\xA6\x02\x00\x00\x00\x04\x00\x54\xAA\x01\x00\x00\x00\x00'
b'\x42\x00\xA7\x02\x00\x00\x00\x04\x00\x00\x00\x07\x00\x00\x00'
b'\x00')
self.encoding_d = BytearrayStream(
b'\x42\x00\xA4\x01\x00\x00\x00\x38\x42\x00\xA5\x07\x00\x00\x00\x0D'
b'\x41\x43\x4D\x45\x20\x5A\x49\x50\x20\x43\x4F\x44\x45\x00\x00\x00'
b'\x42\x00\xA6\x02\x00\x00\x00\x04\x00\x54\xAA\x02\x00\x00\x00\x00'
b'\x42\x00\xA7\x02\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00'
b'\x00')
def tearDown(self):
super(TestExtensionInformation, self).tearDown()
def _test_init(self):
pass
def test_init_with_none(self):
ExtensionInformation()
def test_init_with_args(self):
ExtensionInformation(
extension_name=ExtensionName(),
extension_tag=ExtensionTag(),
extension_type=ExtensionType())
def test_validate_with_invalid_extension_name(self):
"""
Test that a TypeError exception is raised when an invalid
ExtensionName is used to construct an ExtensionInformation object.
"""
kwargs = {'extension_name': 'invalid'}
self.assertRaisesRegexp(
TypeError, "invalid extension name",
ExtensionInformation, **kwargs)
def test_validate_with_invalid_extension_tag(self):
"""
Test that a TypeError exception is raised when an invalid
ExtensionTag is used to construct an ExtensionInformation object.
"""
kwargs = {'extension_tag': 'invalid'}
self.assertRaisesRegexp(
TypeError, "invalid extension tag",
ExtensionInformation, **kwargs)
def test_validate_with_invalid_extension_type(self):
"""
Test that a TypeError exception is raised when an invalid
ExtensionType is used to construct an ExtensionInformation object.
"""
kwargs = {'extension_type': 'invalid'}
self.assertRaisesRegexp(
TypeError, "invalid extension type",
ExtensionInformation, **kwargs)
def _test_read(self, stream, extension_name, extension_tag,
extension_type):
extension_information = ExtensionInformation()
extension_information.read(stream)
if extension_name is None:
extension_name = ExtensionName()
msg = "extension name encoding mismatch"
msg += "; expected {0}, observed {1}".format(
extension_name,
extension_information.extension_name)
self.assertEqual(
extension_name,
extension_information.extension_name, msg)
msg = "extension tag encoding mismatch"
msg += "; expected {0}, observed {1}".format(
extension_tag,
extension_information.extension_tag)
self.assertEqual(
extension_tag,
extension_information.extension_tag, msg)
msg = "extension type encoding mismatch"
msg += "; expected {0}, observed {1}".format(
extension_type,
extension_information.extension_type)
self.assertEqual(
extension_type,
extension_information.extension_type, msg)
def test_read_with_none(self):
"""
Test that an ExtensionInformation object with no data can be read from
a data stream.
"""
self._test_read(self.encoding_a, None, None, None)
def test_read_with_partial_args(self):
"""
Test that an ExtensionInformation object with some data can be read
from a data stream.
"""
self._test_read(self.encoding_b, self.extension_name_b, None, None)
def test_read_with_multiple_args(self):
"""
Test that an ExtensionInformation object with data can be read from a
data stream.
"""
self._test_read(self.encoding_c, self.extension_name_c,
self.extension_tag_c, self.extension_type_c)
def _test_write(self, stream_expected, extension_name, extension_tag,
extension_type):
stream_observed = BytearrayStream()
extension_information = ExtensionInformation(
extension_name=extension_name,
extension_tag=extension_tag,
extension_type=extension_type)
extension_information.write(stream_observed)
length_expected = len(stream_expected)
length_observed = len(stream_observed)
msg = "encoding lengths not equal"
msg += "; expected {0}, observed {1}".format(
length_expected, length_observed)
self.assertEqual(length_expected, length_observed, msg)
msg = "encoding mismatch"
msg += ";\nexpected:\n{0}\nobserved:\n{1}".format(
stream_expected, stream_observed)
self.assertEqual(stream_expected, stream_observed, msg)
def test_write_with_none(self):
"""
Test that an ExtensionInformation object with no data can be written
to a data stream.
"""
self._test_write(self.encoding_a, None, None, None)
def test_write_with_partial_args(self):
"""
Test that an ExtensionInformation object with some data can be written
to a data stream.
"""
self._test_write(self.encoding_b, self.extension_name_b, None, None)
def test_write_with_multiple_args(self):
"""
Test that an ExtensionInformation object with data can be written to
a data stream.
"""
self._test_write(self.encoding_c, self.extension_name_c,
self.extension_tag_c, self.extension_type_c)
def _test_create(self, extension_name, extension_tag, extension_type):
extension_information = ExtensionInformation.create(
extension_name=extension_name,
extension_tag=extension_tag,
extension_type=extension_type)
self.assertIsInstance(extension_information, ExtensionInformation)
expected = ExtensionName(extension_name)
observed = extension_information.extension_name
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
expected = ExtensionTag(extension_tag)
observed = extension_information.extension_tag
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
expected = ExtensionType(extension_type)
observed = extension_information.extension_type
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
def test_create_with_none(self):
"""
Test that an ExtensionInformation object with no data can be created
using the create class method.
"""
self._test_create(None, None, None)
def test_create_with_args(self):
"""
Test that an ExtensionInformation object with data can be created
using the create class method.
"""
self._test_create('ACME LOCATION', 5548545, 7)
def test_equal_on_equal(self):
"""
Test that the equality operator returns True when comparing two
ExtensionInformation objects with the same internal data.
"""
a = ExtensionInformation(
extension_name=self.extension_name_c,
extension_tag=self.extension_tag_c,
extension_type=self.extension_type_c)
b = ExtensionInformation(
extension_name=self.extension_name_c,
extension_tag=self.extension_tag_c,
extension_type=self.extension_type_c)
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_equal_and_empty(self):
"""
Test that the equality operator returns True when comparing two
ExtensionInformation objects with no internal data.
"""
a = ExtensionInformation()
b = ExtensionInformation()
self.assertTrue(a == b)
self.assertTrue(b == a)
def test_equal_on_not_equal(self):
"""
Test that the equality operator returns False when comparing two
ExtensionInformation objects with different sets of internal data.
"""
a = ExtensionInformation(
extension_name=self.extension_name_c,
extension_tag=self.extension_tag_c,
extension_type=self.extension_type_c)
b = ExtensionInformation()
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_equal_on_type_mismatch(self):
"""
Test that the equality operator returns False when comparing an
ExtensionInformation object with a non-ExtensionInformation object.
"""
a = ExtensionInformation(
extension_name=self.extension_name_c,
extension_tag=self.extension_tag_c,
extension_type=self.extension_type_c)
b = "invalid"
self.assertFalse(a == b)
self.assertFalse(b == a)
def test_not_equal_on_equal(self):
"""
Test that the inequality operator returns False when comparing two
ExtensionInformation objects with the same internal data.
"""
a = ExtensionInformation(
extension_name=self.extension_name_c,
extension_tag=self.extension_tag_c,
extension_type=self.extension_type_c)
b = ExtensionInformation(
extension_name=self.extension_name_c,
extension_tag=self.extension_tag_c,
extension_type=self.extension_type_c)
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_equal_and_empty(self):
"""
Test that the inequality operator returns False when comparing two
ExtensionInformation objects with no internal data.
"""
a = ExtensionInformation()
b = ExtensionInformation()
self.assertFalse(a != b)
self.assertFalse(b != a)
def test_not_equal_on_not_equal(self):
"""
Test that the inequality operator returns True when comparing two
ExtensionInformation objects with the different sets of internal data.
"""
a = ExtensionInformation(
extension_name=self.extension_name_c,
extension_tag=self.extension_tag_c,
extension_type=self.extension_type_c)
b = ExtensionInformation()
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_not_equal_on_type_mismatch(self):
"""
Test that the inequality operator returns True when comparing an
ExtensionInformation object with a non-ExtensionInformation object.
"""
a = ExtensionInformation(
extension_name=self.extension_name_c,
extension_tag=self.extension_tag_c,
extension_type=self.extension_type_c)
b = "invalid"
self.assertTrue(a != b)
self.assertTrue(b != a)
def test_repr_with_no_data(self):
"""
Test that the representation of an ExtensionInformation object with no
data is formatted properly and can be used by eval to create a new
ExtensionInformation object identical to the original.
"""
extension_information = ExtensionInformation()
expected = "ExtensionInformation("
expected += "extension_name=ExtensionName(value=''), "
expected += "extension_tag=None, "
expected += "extension_type=None)"
observed = repr(extension_information)
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
expected = extension_information
observed = eval(repr(extension_information))
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
def test_repr_with_data(self):
"""
Test that the representation of an ExtensionInformation object with
data is formatted properly and can be used by eval to create a new
ExtensionInformation object identical to the original.
"""
extension_information = ExtensionInformation(
extension_name=ExtensionName('ACME LOCATION'),
extension_tag=ExtensionTag(5548545),
extension_type=ExtensionType(7))
expected = "ExtensionInformation("
expected += "extension_name=ExtensionName(value='ACME LOCATION'), "
expected += "extension_tag=ExtensionTag(value=5548545), "
expected += "extension_type=ExtensionType(value=7))"
observed = repr(extension_information)
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
expected = extension_information
observed = eval(repr(extension_information))
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
def test_str_with_no_data(self):
"""
Test that the string representation of an ExtensionInformation object
is formatted properly when there is no internal data.
"""
extension_information = ExtensionInformation()
expected = "ExtensionInformation("
expected += "extension_name=ExtensionName(value=''), "
expected += "extension_tag=None, "
expected += "extension_type=None)"
observed = str(extension_information)
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
def test_str_with_data(self):
"""
Test that the string representation of an ExtensionInformation object
is formatted properly when there is internal data.
"""
extension_information = ExtensionInformation(
extension_name=ExtensionName('ACME LOCATION'),
extension_tag=ExtensionTag(5548545),
extension_type=ExtensionType(7))
expected = "ExtensionInformation("
expected += "extension_name=ExtensionName(value='ACME LOCATION'), "
expected += "extension_tag=ExtensionTag(value=5548545), "
expected += "extension_type=ExtensionType(value=7))"
observed = str(extension_information)
msg = "expected {0}, observed {1}".format(expected, observed)
self.assertEqual(expected, observed, msg)
| |
from __future__ import print_function
'''
Classify sounds using database - evaluation code
Author: Scott H. Hawley
This is kind of a mixture of Keun Woo Choi's code https://github.com/keunwoochoi/music-auto_tagging-keras
and the MNIST classifier at https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py
Trained using Fraunhofer IDMT's database of monophonic guitar effects,
clips were 2 seconds long, sampled at 44100 Hz
'''
import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
from keras.models import Sequential, Model
from keras.layers import Input, Dense, TimeDistributed, LSTM, Dropout, Activation
from keras.layers import Convolution2D, MaxPooling2D, Flatten
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import ELU
from keras.callbacks import ModelCheckpoint
from keras import backend
from keras.utils import np_utils
import os
from os.path import isfile
from sklearn.metrics import roc_auc_score
from timeit import default_timer as timer
from sklearn.metrics import roc_auc_score, roc_curve, auc
mono=True
def get_class_names(path="Preproc/"): # class names are subdirectory names in Preproc/ directory
class_names = os.listdir(path)
return class_names
def get_total_files(path="Preproc/",train_percentage=0.8):
sum_total = 0
sum_train = 0
sum_test = 0
subdirs = os.listdir(path)
for subdir in subdirs:
files = os.listdir(path+subdir)
n_files = len(files)
sum_total += n_files
n_train = int(train_percentage*n_files)
n_test = n_files - n_train
sum_train += n_train
sum_test += n_test
return sum_total, sum_train, sum_test
def get_sample_dimensions(path='Preproc/'):
classname = os.listdir(path)[0]
files = os.listdir(path+classname)
infilename = files[0]
audio_path = path + classname + '/' + infilename
melgram = np.load(audio_path)
print(" get_sample_dimensions: melgram.shape = ",melgram.shape)
return melgram.shape
def encode_class(class_name, class_names): # makes a "one-hot" vector for each class name called
try:
idx = class_names.index(class_name)
vec = np.zeros(len(class_names))
vec[idx] = 1
return vec
except ValueError:
return None
def decode_class(vec, class_names): # generates a number from the one-hot vector
return int(np.argmax(vec))
def shuffle_XY_paths(X,Y,paths): # generates a randomized order, keeping X&Y(&paths) together
assert (X.shape[0] == Y.shape[0] )
idx = np.array(range(Y.shape[0]))
np.random.shuffle(idx)
newX = np.copy(X)
newY = np.copy(Y)
newpaths = paths
for i in range(len(idx)):
newX[i] = X[idx[i],:,:]
newY[i] = Y[idx[i],:]
newpaths[i] = paths[idx[i]]
return newX, newY, newpaths
def build_datasets(train_percentage=0.8, preproc=False):
'''
So we make the training & testing datasets here, and we do it separately.
Why not just make one big dataset, shuffle, and then split into train & test?
because we want to make sure statistics in training & testing are as similar as possible
'''
if (preproc):
path = "Preproc/"
else:
path = "Samples/"
class_names = get_class_names(path=path)
print("class_names = ",class_names)
total_files, total_train, total_test = get_total_files(path=path, train_percentage=train_percentage)
print("total files = ",total_files)
nb_classes = len(class_names)
mel_dims = get_sample_dimensions(path=path)
# pre-allocate memory for speed (old method used np.concatenate, slow)
X_train = np.zeros((total_train, mel_dims[1], mel_dims[2], mel_dims[3]))
Y_train = np.zeros((total_train, nb_classes))
X_test = np.zeros((total_test, mel_dims[1], mel_dims[2], mel_dims[3]))
Y_test = np.zeros((total_test, nb_classes))
paths_train = []
paths_test = []
train_count = 0
test_count = 0
for idx, classname in enumerate(class_names):
this_Y = np.array(encode_class(classname,class_names) )
this_Y = this_Y[np.newaxis,:]
class_files = os.listdir(path+classname)
n_files = len(class_files)
n_load = n_files
n_train = int(train_percentage * n_load)
printevery = 100
print("")
for idx2, infilename in enumerate(class_files[0:n_load]):
audio_path = path + classname + '/' + infilename
if (0 == idx2 % printevery):
print('\r Loading class: {:14s} ({:2d} of {:2d} classes)'.format(classname,idx+1,nb_classes),
", file ",idx2+1," of ",n_load,": ",audio_path,sep="")
#start = timer()
if (preproc):
melgram = np.load(audio_path)
sr = 44100
else:
aud, sr = librosa.load(audio_path, mono=mono,sr=None)
melgram = librosa.logamplitude(librosa.feature.melspectrogram(aud, sr=sr, n_mels=96),ref_power=1.0)[np.newaxis,np.newaxis,:,:]
#end = timer()
#print("time = ",end - start)
melgram = melgram[:,:,:,0:mel_dims[3]] # just in case files are differnt sizes: clip to first file size
if (idx2 < n_train):
# concatenate is SLOW for big datasets; use pre-allocated instead
#X_train = np.concatenate((X_train, melgram), axis=0)
#Y_train = np.concatenate((Y_train, this_Y), axis=0)
X_train[train_count,:,:] = melgram
Y_train[train_count,:] = this_Y
paths_train.append(audio_path) # list-appending is still fast. (??)
train_count += 1
else:
X_test[test_count,:,:] = melgram
Y_test[test_count,:] = this_Y
#X_test = np.concatenate((X_test, melgram), axis=0)
#Y_test = np.concatenate((Y_test, this_Y), axis=0)
paths_test.append(audio_path)
test_count += 1
print("")
print("Shuffling order of data...")
X_train, Y_train, paths_train = shuffle_XY_paths(X_train, Y_train, paths_train)
X_test, Y_test, paths_test = shuffle_XY_paths(X_test, Y_test, paths_test)
return X_train, Y_train, paths_train, X_test, Y_test, paths_test, class_names, sr
def build_model(X,Y,nb_classes):
nb_filters = 32 # number of convolutional filters to use
pool_size = (2, 2) # size of pooling area for max pooling
kernel_size = (3, 3) # convolution kernel size
nb_layers = 4
input_shape = (1, X.shape[2], X.shape[3])
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid', input_shape=input_shape))
model.add(BatchNormalization(axis=1, mode=2))
model.add(Activation('relu'))
for layer in range(nb_layers-1):
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1]))
model.add(BatchNormalization(axis=1, mode=2))
model.add(ELU(alpha=1.0))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation("softmax"))
return model
if __name__ == '__main__':
np.random.seed(1)
# get the data
X_train, Y_train, paths_train, X_test, Y_test, paths_test, class_names, sr = build_datasets(preproc=True)
# make the model
model = build_model(X_train,Y_train, nb_classes=len(class_names))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.summary()
# Initialize weights using checkpoint if it exists. (Checkpointing requires h5py)
checkpoint_filepath = 'weights.hdf5'
if (True):
print("Looking for previous weights...")
if ( isfile(checkpoint_filepath) ):
print ('Checkpoint file detected. Loading weights.')
model.load_weights(checkpoint_filepath)
else:
print ('No checkpoint file detected. You gotta train_network first.')
exit(1)
else:
print('Starting from scratch (no checkpoint)')
print("class names = ",class_names)
batch_size = 128
num_pred = X_test.shape[0]
# evaluate the model
print("Running model.evaluate...")
scores = model.evaluate(X_test, Y_test, verbose=1, batch_size=batch_size)
print('Test score:', scores[0])
print('Test accuracy:', scores[1])
print("Running predict_proba...")
y_scores = model.predict_proba(X_test[0:num_pred,:,:,:],batch_size=batch_size)
auc_score = roc_auc_score(Y_test, y_scores)
print("AUC = ",auc_score)
n_classes = len(class_names)
print(" Counting mistakes ")
mistakes = np.zeros(n_classes)
for i in range(Y_test.shape[0]):
pred = decode_class(y_scores[i],class_names)
true = decode_class(Y_test[i],class_names)
if (pred != true):
mistakes[true] += 1
mistakes_sum = int(np.sum(mistakes))
print(" Found",mistakes_sum,"mistakes out of",Y_test.shape[0],"attempts")
print(" Mistakes by class: ",mistakes)
print("Generating ROC curves...")
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(Y_test[:, i], y_scores[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
plt.figure()
lw = 2
for i in range(n_classes):
plt.plot(fpr[i], tpr[i],
lw=lw, label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import fields
from horizon.utils.validators import validate_port_range
from horizon import workflows
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class AddPoolAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
subnet_id = forms.ChoiceField(label=_("Subnet"))
protocol = forms.ChoiceField(label=_("Protocol"))
lb_method = forms.ChoiceField(label=_("Load Balancing Method"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddPoolAction, self).__init__(request, *args, **kwargs)
tenant_id = request.user.tenant_id
subnet_id_choices = [('', _("Select a Subnet"))]
try:
networks = api.quantum.network_list_for_tenant(request, tenant_id)
except:
exceptions.handle(request,
_('Unable to retrieve networks list.'))
networks = []
for n in networks:
for s in n['subnets']:
subnet_id_choices.append((s.id, s.cidr))
self.fields['subnet_id'].choices = subnet_id_choices
protocol_choices = [('', _("Select a Protocol"))]
protocol_choices.append(('HTTP', 'HTTP'))
protocol_choices.append(('HTTPS', 'HTTPS'))
self.fields['protocol'].choices = protocol_choices
lb_method_choices = [('', _("Select a Method"))]
lb_method_choices.append(('ROUND_ROBIN', 'ROUND_ROBIN'))
lb_method_choices.append(('LEAST_CONNECTIONS', 'LEAST_CONNECTIONS'))
lb_method_choices.append(('SOURCE_IP', 'SOURCE_IP'))
self.fields['lb_method'].choices = lb_method_choices
class Meta:
name = _("Add New Pool")
permissions = ('openstack.services.network',)
help_text = _("Create Pool for current tenant.\n\n"
"Assign a name and description for the pool. "
"Choose one subnet where all members of this "
"pool must be on. "
"Select the protocol and load balancing method "
"for this pool. "
"Admin State is UP (checked) by default.")
class AddPoolStep(workflows.Step):
action_class = AddPoolAction
contributes = ("name", "description", "subnet_id",
"protocol", "lb_method", "admin_state_up")
def contribute(self, data, context):
context = super(AddPoolStep, self).contribute(data, context)
if data:
return context
class AddPool(workflows.Workflow):
slug = "addpool"
name = _("Add Pool")
finalize_button_name = _("Add")
success_message = _('Added pool "%s".')
failure_message = _('Unable to add pool "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddPoolStep,)
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def handle(self, request, context):
try:
pool = api.lbaas.pool_create(request, **context)
return True
except:
return False
class AddVipAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
floatip_address = forms.ChoiceField(
label=_("VIP Address from Floating IPs"),
widget=forms.Select(attrs={'disabled': 'disabled'}),
required=False)
other_address = fields.IPField(required=False,
initial="",
version=fields.IPv4,
mask=False)
protocol_port = forms.IntegerField(label=_("Protocol Port"), min_value=1,
help_text=_("Enter an integer value "
"between 1 and 65535."),
validators=[validate_port_range])
protocol = forms.ChoiceField(label=_("Protocol"))
session_persistence = forms.ChoiceField(
required=False, initial={}, label=_("Session Persistence"))
cookie_name = forms.CharField(
initial="", required=False,
max_length=80, label=_("Cookie Name"),
help_text=_("Required for APP_COOKIE persistence;"
" Ignored otherwise."))
connection_limit = forms.IntegerField(
min_value=-1, label=_("Connection Limit"),
help_text=_("Maximum number of connections allowed "
"for the VIP or '-1' if the limit is not set"))
admin_state_up = forms.BooleanField(
label=_("Admin State"), initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddVipAction, self).__init__(request, *args, **kwargs)
self.fields['other_address'].label = _("Specify a free IP address"
" from %s" %
args[0]['subnet'])
protocol_choices = [('', _("Select a Protocol"))]
protocol_choices.append(('HTTP', 'HTTP'))
protocol_choices.append(('HTTPS', 'HTTPS'))
self.fields['protocol'].choices = protocol_choices
session_persistence_choices = [('', _("Set Session Persistence"))]
for mode in ('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'):
session_persistence_choices.append((mode, mode))
self.fields[
'session_persistence'].choices = session_persistence_choices
floatip_address_choices = [('', _("Currently Not Supported"))]
self.fields['floatip_address'].choices = floatip_address_choices
def clean(self):
cleaned_data = super(AddVipAction, self).clean()
if (cleaned_data.get('session_persistence') == 'APP_COOKIE' and
not cleaned_data.get('cookie_name')):
msg = _('Cookie name is required for APP_COOKIE persistence.')
self._errors['cookie_name'] = self.error_class([msg])
return cleaned_data
class Meta:
name = _("Specify VIP")
permissions = ('openstack.services.network',)
help_text = _("Create a VIP for this pool. "
"Assign a name and description for the VIP. "
"Specify an IP address and port for the VIP. "
"Choose the protocol and session persistence "
"method for the VIP."
"Specify the max connections allowed. "
"Admin State is UP (checked) by default.")
class AddVipStep(workflows.Step):
action_class = AddVipAction
depends_on = ("pool_id", "subnet")
contributes = ("name", "description", "floatip_address",
"other_address", "protocol_port", "protocol",
"session_persistence", "cookie_name",
"connection_limit", "admin_state_up")
def contribute(self, data, context):
context = super(AddVipStep, self).contribute(data, context)
return context
class AddVip(workflows.Workflow):
slug = "addvip"
name = _("Add VIP")
finalize_button_name = _("Add")
success_message = _('Added VIP "%s".')
failure_message = _('Unable to add VIP "%s".')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddVipStep,)
def format_status_message(self, message):
name = self.context.get('name')
return message % name
def handle(self, request, context):
if context['other_address'] == '':
context['address'] = context['floatip_address']
else:
if not context['floatip_address'] == '':
self.failure_message = _('Only one address can be specified. '
'Unable to add VIP "%s".')
return False
else:
context['address'] = context['other_address']
try:
pool = api.lbaas.pool_get(request, context['pool_id'])
context['subnet_id'] = pool['subnet_id']
except:
context['subnet_id'] = None
self.failure_message = _('Unable to retrieve the specified pool. '
'Unable to add VIP "%s".')
return False
if context['session_persistence']:
stype = context['session_persistence']
if stype == 'APP_COOKIE':
cookie = context['cookie_name']
context['session_persistence'] = {'type': stype,
'cookie_name': cookie}
else:
context['session_persistence'] = {'type': stype}
else:
context['session_persistence'] = {}
try:
api.lbaas.vip_create(request, **context)
return True
except:
return False
class AddMemberAction(workflows.Action):
pool_id = forms.ChoiceField(label=_("Pool"))
members = forms.MultipleChoiceField(
label=_("Member(s)"),
required=True,
initial=["default"],
widget=forms.CheckboxSelectMultiple(),
error_messages={'required':
_('At least one member must be specified')},
help_text=_("Select members for this pool "))
weight = forms.IntegerField(max_value=256, min_value=0, label=_("Weight"),
help_text=_("Relative part of requests this "
"pool member serves compared to others"))
protocol_port = forms.IntegerField(label=_("Protocol Port"), min_value=1,
help_text=_("Enter an integer value "
"between 1 and 65535."),
validators=[validate_port_range])
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddMemberAction, self).__init__(request, *args, **kwargs)
pool_id_choices = [('', _("Select a Pool"))]
try:
pools = api.lbaas.pools_get(request)
except:
pools = []
exceptions.handle(request,
_('Unable to retrieve pools list.'))
pools = sorted(pools,
key=lambda pool: pool.name)
for p in pools:
pool_id_choices.append((p.id, p.name))
self.fields['pool_id'].choices = pool_id_choices
members_choices = []
try:
servers, has_more = api.nova.server_list(request)
except:
servers = []
exceptions.handle(request,
_('Unable to retrieve instances list.'))
if len(servers) == 0:
self.fields['members'].label = _("No servers available. "
"Click Add to cancel.")
self.fields['members'].required = False
self.fields['members'].help_text = _("Select members "
"for this pool ")
self.fields['pool_id'].required = False
self.fields['weight'].required = False
self.fields['protocol_port'].required = False
return
for m in servers:
members_choices.append((m.id, m.name))
self.fields['members'].choices = sorted(
members_choices,
key=lambda member: member[1])
class Meta:
name = _("Add New Member")
permissions = ('openstack.services.network',)
help_text = _("Add member to selected pool.\n\n"
"Choose one or more listed instances to be "
"added to the pool as member(s). "
"Assign a numeric weight for this member "
"Specify the port number the member(s) "
"operate on; e.g., 80.")
class AddMemberStep(workflows.Step):
action_class = AddMemberAction
contributes = ("pool_id", "members", "protocol_port", "weight",
"admin_state_up")
def contribute(self, data, context):
context = super(AddMemberStep, self).contribute(data, context)
return context
class AddMember(workflows.Workflow):
slug = "addmember"
name = _("Add Member")
finalize_button_name = _("Add")
success_message = _('Added member(s).')
failure_message = _('Unable to add member(s).')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddMemberStep,)
def handle(self, request, context):
for m in context['members']:
params = {'device_id': m}
try:
plist = api.quantum.port_list(request, **params)
except:
return False
if plist:
context['address'] = plist[0].fixed_ips[0]['ip_address']
try:
context['member_id'] = api.lbaas.member_create(
request, **context).id
except:
return False
return True
class AddMonitorAction(workflows.Action):
type = forms.ChoiceField(
label=_("Type"),
choices=[('ping', _('PING')),
('tcp', _('TCP')),
('http', _('HTTP')),
('https', _('HTTPS'))],
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'type'
}))
delay = forms.IntegerField(
min_value=1,
label=_("Delay"),
help_text=_("The minimum time in seconds between regular checks "
"of a member"))
timeout = forms.IntegerField(
min_value=1,
label=_("Timeout"),
help_text=_("The maximum time in seconds for a monitor to wait "
"for a reply"))
max_retries = forms.IntegerField(
max_value=10, min_value=1,
label=_("Max Retries (1~10)"),
help_text=_("Number of permissible failures before changing "
"the status of member to inactive"))
http_method = forms.ChoiceField(
initial="GET",
required=False,
choices=[('GET', _('GET'))],
label=_("HTTP Method"),
help_text=_("HTTP method used to check health status of a member"),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('HTTP Method'),
'data-type-https': _('HTTP Method')
}))
url_path = forms.CharField(
initial="/",
required=False,
max_length=80,
label=_("URL"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('URL'),
'data-type-https': _('URL')
}))
expected_codes = forms.RegexField(
initial="200",
required=False,
max_length=80,
regex=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$',
label=_("Expected HTTP Status Codes"),
help_text=_("Expected code may be a single value (e.g. 200), "
"a list of values (e.g. 200, 202), "
"or range of values (e.g. 200-204)"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'type',
'data-type-http': _('Expected HTTP Status Codes'),
'data-type-https': _('Expected HTTP Status Codes')
}))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddMonitorAction, self).__init__(request, *args, **kwargs)
def clean(self):
cleaned_data = super(AddMonitorAction, self).clean()
type_opt = cleaned_data.get('type')
if type_opt in ['http', 'https']:
http_method_opt = cleaned_data.get('http_method')
url_path = cleaned_data.get('url_path')
expected_codes = cleaned_data.get('expected_codes')
if not http_method_opt:
msg = _('Please choose a HTTP method')
self._errors['http_method'] = self.error_class([msg])
if not url_path:
msg = _('Please specify an URL')
self._errors['url_path'] = self.error_class([msg])
if not expected_codes:
msg = _('Please enter a single value (e.g. 200), '
'a list of values (e.g. 200, 202), '
'or range of values (e.g. 200-204)')
self._errors['expected_codes'] = self.error_class([msg])
return cleaned_data
class Meta:
name = _("Add New Monitor")
permissions = ('openstack.services.network',)
help_text = _("Create a monitor template.\n\n"
"Select type of monitoring. "
"Specify delay, timeout, and retry limits "
"required by the monitor. "
"Specify method, URL path, and expected "
"HTTP codes upon success.")
class AddMonitorStep(workflows.Step):
action_class = AddMonitorAction
contributes = ("type", "delay", "timeout", "max_retries",
"http_method", "url_path", "expected_codes",
"admin_state_up")
def contribute(self, data, context):
context = super(AddMonitorStep, self).contribute(data, context)
if data:
return context
class AddMonitor(workflows.Workflow):
slug = "addmonitor"
name = _("Add Monitor")
finalize_button_name = _("Add")
success_message = _('Added monitor')
failure_message = _('Unable to add monitor')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddMonitorStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_health_monitor_create(
request, **context).get('id')
return True
except:
exceptions.handle(request, _("Unable to add monitor."))
return False
class AddPMAssociationAction(workflows.Action):
monitor_id = forms.ChoiceField(label=_("Monitor"))
def __init__(self, request, *args, **kwargs):
super(AddPMAssociationAction, self).__init__(request, *args, **kwargs)
def populate_monitor_id_choices(self, request, context):
self.fields['monitor_id'].label = _("Select a monitor template "
"for %s" % context['pool_name'])
monitor_id_choices = [('', _("Select a Monitor"))]
try:
monitors = api.lbaas.pool_health_monitors_get(request)
for m in monitors:
if m.id not in context['pool_monitors']:
monitor_id_choices.append((m.id, m.id))
except:
exceptions.handle(request,
_('Unable to retrieve monitors list.'))
self.fields['monitor_id'].choices = monitor_id_choices
return monitor_id_choices
class Meta:
name = _("Association Details")
permissions = ('openstack.services.network',)
help_text = _("Associate a health monitor with target pool.")
class AddPMAssociationStep(workflows.Step):
action_class = AddPMAssociationAction
depends_on = ("pool_id", "pool_name", "pool_monitors")
contributes = ("monitor_id",)
def contribute(self, data, context):
context = super(AddPMAssociationStep, self).contribute(data, context)
if data:
return context
class AddPMAssociation(workflows.Workflow):
slug = "addassociation"
name = _("Add Association")
finalize_button_name = _("Add")
success_message = _('Added association.')
failure_message = _('Unable to add association.')
success_url = "horizon:project:loadbalancers:index"
default_steps = (AddPMAssociationStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_monitor_association_create(
request, **context)
return True
except:
exceptions.handle(request, _("Unable to add association."))
return False
class DeletePMAssociationAction(workflows.Action):
monitor_id = forms.ChoiceField(label=_("Monitor"))
def __init__(self, request, *args, **kwargs):
super(DeletePMAssociationAction, self).__init__(
request, *args, **kwargs)
def populate_monitor_id_choices(self, request, context):
self.fields['monitor_id'].label = _("Select a health monitor of %s" %
context['pool_name'])
monitor_id_choices = [('', _("Select a Monitor"))]
try:
for m_id in context['pool_monitors']:
monitor_id_choices.append((m_id, m_id))
except:
exceptions.handle(request,
_('Unable to retrieve monitors list.'))
self.fields['monitor_id'].choices = monitor_id_choices
return monitor_id_choices
class Meta:
name = _("Association Details")
permissions = ('openstack.services.network',)
help_text = _("Disassociate a health monitor from target pool. ")
class DeletePMAssociationStep(workflows.Step):
action_class = DeletePMAssociationAction
depends_on = ("pool_id", "pool_name", "pool_monitors")
contributes = ("monitor_id",)
def contribute(self, data, context):
context = super(DeletePMAssociationStep, self).contribute(
data, context)
if data:
return context
class DeletePMAssociation(workflows.Workflow):
slug = "deleteassociation"
name = _("Delete Association")
finalize_button_name = _("Delete")
success_message = _('Deleted association.')
failure_message = _('Unable to delete association.')
success_url = "horizon:project:loadbalancers:index"
default_steps = (DeletePMAssociationStep,)
def handle(self, request, context):
try:
context['monitor_id'] = api.lbaas.pool_monitor_association_delete(
request, **context)
return True
except:
exceptions.handle(request, _("Unable to delete association."))
return False
| |
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import datetime
import uuid
from oslo.config import cfg
import webob
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import servers
from nova.api.openstack.compute.plugins.v3 import user_data
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
from nova.network import manager
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-user-data',
'osapi_v3')
self.no_user_data_controller = servers.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
user_data.ATTRIBUTE_NAME: None,
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"root_device_name": inst.get('root_device_name', 'vda'),
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2)
if no_image:
server.pop('image_ref', None)
server.update(params)
body = dict(server=server)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
if override_controller:
server = override_controller.create(req, body=body).obj['server']
else:
server = self.controller.create(req, body=body).obj['server']
def test_create_instance_with_user_data_disabled(self):
params = {user_data.ATTRIBUTE_NAME: base64.b64encode('fake')}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('user_data', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(
params,
override_controller=self.no_user_data_controller)
def test_create_instance_with_user_data_enabled(self):
params = {user_data.ATTRIBUTE_NAME: base64.b64encode('fake')}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIn('user_data', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_user_data(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/flavors/3'
value = "A random string"
body = {
'server': {
'name': 'user_data_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
user_data.ATTRIBUTE_NAME: base64.b64encode(value),
},
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, body=body).obj
server = res['server']
self.assertEqual(FAKE_UUID, server['id'])
def test_create_instance_with_bad_user_data(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/flavors/3'
value = "A random string"
body = {
'server': {
'name': 'user_data_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
},
user_data.ATTRIBUTE_NAME: value,
},
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body=body)
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 10:08:17 2018
@author: fboers
"""
import os,sys,re,textwrap
import ast
from types import ModuleType
from pathlib import Path
from glob import glob
import numpy as np
import logging
logger = logging.getLogger('jumeg')
from jumeg.base.jumeg_base import JuMEG_Base_Basic
jb = JuMEG_Base_Basic()
__version__="2019.05.14.001"
class JuMEG_IoUtils_FunctionParserBase(object):
"""
Base CLS to read function from a python
Paremeters:
-----------
name : <None>
prefix : <jumeg>
fullfile : <None>
extention: <.py>
function : <get_args>
import : <None>
verbose : <False>
Example:
--------
command/module: my-path/jumeg/jumeg_test01.py
name : jumeg_test01
fullfile : my-path/jumeg/jumeg_test01.py
command : dict of {"name":None,"prefix":"jumeg","fullfile":None,
"extention":".py","function":"get_args","package":None}
"""
def __init__(self,**kwargs):
super(JuMEG_IoUtils_FunctionParserBase,self).__init__()
self.__command = {"module_name":None,"prefix":"jumeg","fullfile":None,"extention":".py","function":"get_args","package":None}
self._isLoaded = False
self._update_kwargs(**kwargs)
self.verbose = False
@property
def command(self): return self.__command
def _get_module_name(self) : return self.__command["module_name"]
def _set_module_name(self,v): self.__command["module_name"]=v
name = property(_get_module_name,_set_module_name)
module = property(_get_module_name,_set_module_name)
module_name = property(_get_module_name,_set_module_name)
@property
def prefix(self) : return self.__command["prefix"]
@prefix.setter
def prefix(self,v): self.__command["prefix"]=v
@property
def package(self) : return self.__command["package"]
@package.setter
def pachkage(self,v) : self.__command["package"]=v
@property
def fullfile(self) : return self.__command["fullfile"]
@fullfile.setter
def fullfile(self,v):
self._isLoaded = False
self.__command["fullfile"]=v
@property
def function(self) : return self.__command["function"]
@function.setter
def function(self,v):
self._isLoaded = False
self.__command["function"]=v
@property
def extention(self) : return self.__command["extention"]
@extention.setter
def extention(self,v): self.__command["extention"]=v
@property
def import_name(self): return self.prefix +"."+self.name
#-----
def update(self,**kwargs):
self._update_kwargs(**kwargs)
def _update_kwargs(self,**kwargs):
for k in self.__command.keys():
self.__command[k] = kwargs.get(k,self.__command[k])
self.module = kwargs.get("module",self.module)
def info(self):
logger.info( jb.pp_list2str( self.command,head="JuMEG Function Command") )
class JuMEG_IoUtils_FunctionParser(JuMEG_IoUtils_FunctionParserBase):
"""
parse a function from a python text file
special issue
extract a function from a text file e.g. <get_arg> in <jumeg_filter.py>
and compile it in a new module e.g. for argparser gui
avoiding/excluding <import> and imports of dependencies
Parameters
-----------
name : <None>
prefix : <jumeg>
fullfile : <None>
extention: <.py>
function : <get_args>
import : <None>
start_pattern: <def >
stop_pattern : <return >
verbose : <False>
Example
-------
from jumeg.ioutils.jumeg_ioutils_function_parser import JuMEG_IoUtils_FunctionParser
JFP = JuMEG_IoUtils_FunctionParser()
JFP.fullfile = os.environ["JUMEG_PATH"]+"/jumeg/filter/jumeg_filter.py"
JFP.function = "get_args"
opt,parser = JFP.apply()
parser.print_help()
"""
def __init__(self,**kwargs):
super(JuMEG_IoUtils_FunctionParser,self).__init__(**kwargs)
self.__command = {"module_name":None,"fullfile":None,"extention":".py","function":"get_args","start_pattern":"def","stop_pattern":"return" }
self.__text = None
self._isLoaded = False
self._update_kwargs(**kwargs)
@property
def function_parser_name(self) : return "JuMEG_IoUtils_FunctionParser_" + self.function
@property
def function_text(self) : return self.__text
@property
def isLoaded(self) : return self._isLoaded
@property
def start_pattern(self) : return self.__command["start_pattern"]
@start_pattern.setter
def start_pattern(self,v): self.__command["start_pattern"]=v
@property
def stop_pattern(self) : return self.__command["stop_pattern"]
@stop_pattern.setter
def stop_pattern(self,v): self.__command["stop_pattern"]=v
def _open_txt(self):
""" open module-file (.py) """
lines = None
#print("Function File: "+self.fullfile)
with open (self.fullfile,"r+") as farg:
lines=farg.readlines()
farg.close()
return lines
def _get_function_code_from_text(self,**kwargs):
""" extract the function text from module"""
self._update_kwargs(**kwargs)
idx = 0
lines = None
l_idx_start = None
l_idx_end = None
l_intend_pos = None
self.__text = None
#--- read in module as txt
lines = self._open_txt()
if not lines : return
#--- mk mathch pattern
find_start_pattern = re.compile(self.start_pattern +'\s+'+ self.function)
#--- find module start index
for l in lines:
if find_start_pattern.match(l):
l_idx_start = idx
break
idx += 1
#--- find intend pos first char
idx = l_idx_start
for l in lines[l_idx_start+1:-1]:
idx += 1
if not len(l.lstrip() ) : continue
if l.lstrip().startswith("#"): continue
l_intend_pos = len(l) - len(l.lstrip()) # line pos of return
break
#--- find module end
idx = l_idx_start
for l in lines[l_idx_start+1:-1]:
idx += 1
if ( l.find( self.stop_pattern) ) == l_intend_pos:
l_idx_end=idx
break
self.__text = "import sys,os,argparse\n" +''.join( lines[l_idx_start:l_idx_end+1] )
lines=[]
return self.__text
def clear(self):
"""
clear function module space
"""
self._isLoaded = False
while sys.getrefcount( self.function_parser_name ):
del sys.modules[self.function_parser_name]
def load_function(self):
"""
load function from source code (text file)
create a new module and execute code
Result
-------
return function results
"""
if self.isLoaded: self.clear()
self._isLoaded = False
if not self._get_function_code_from_text(): return
self.__ModuleType = ModuleType(self.function)
sys.modules[self.function_parser_name] = self.__ModuleType
exec( textwrap.dedent( self.function_text), self.__ModuleType.__dict__)
self._isLoaded = True
def apply(self,*args,**kwargs):
"""
if not loaded load function from source code (text file)
and create a new module and execupte code
execute function
Result
-------
return function results
"""
if not self.isLoaded:
self.load_function()
if self.isLoaded:
return getattr(self.__ModuleType,self.function)(*args,**kwargs) #opt,parser
return None
class JuMEG_IoUtils_JuMEGModule(object):
"""
CLS find jumeg modules under jumeg sub folders in PYTHONOATH
Parameters:
-----------
stage_prefix : jumeg stage start of jumeg directory : <jumeg>
prefix : file prefix <None>
postfix : file postfix <None>
extention : file extention <".py>
permission : file permission <os.X_OK|os.R_OK>
function : function name in module <get_args>
Results:
--------
list of executable modules with fuction < get_args> in jumeg PYTHONPATH
"""
def __init__(self,stage_prefix="jumeg",prefix="jumeg",postfix=None,extention=".py",permission=os.X_OK|os.R_OK,function="get_args",**kwargs):
super(JuMEG_IoUtils_JuMEGModule, self).__init__(**kwargs)
#super().__init__()
self.stage_prefix = stage_prefix
self.prefix = prefix
self.postfix = postfix
self.extention = extention
self.permission = permission
self.function = function
self.skip_dir_list = ["old","gui","wx"]
self._module_list = []
self._jumeg_path_list=[]
@property
def module_list(self): return self._module_list
@property
def jumeg_path_list(self): return self._jumeg_path_list
def PathListItem(self,idx):
return os.path.split( self.module_list[idx] )[0]
def _is_function_in_module(self,fmodule,function=None):
"""
https://stackoverflow.com/questions/45684307/get-source-script-details-similar-to-inspect-getmembers-without-importing-the
"""
if function:
self.function=function
if not self.function: return
try:
mtxt = open(fmodule).read()
tree = ast.parse(mtxt)
for fct in tree.body:
if isinstance( fct,(ast.FunctionDef)):
if fct.name == self.function:
return True
except:
pass
def get_jumeg_path_list_from_pythonpath(self):
""" jumeg """
self._jumeg_path_list=[]
l=[]
for d in os.environ["PYTHONPATH"].split(":"):
if os.environ.get(d):
d = os.environ.get(d)
if d == ".":
d = os.getcwd()
if os.path.isdir( d + "/" + self.stage_prefix):
l.append( d + "/" + self.stage_prefix )
self._jumeg_path_list = list( set(l) ) # exclude double
self._jumeg_path_list.sort()
return self._jumeg_path_list
def ModuleListItem(self,idx):
""" jumeg.my subdirs.<jumeg_function name>"""
if not len(self.module_list): return
p,f = os.path.split( self.module_list[idx])
m = p.split("/"+self.stage_prefix + "/")[-1] + "/" + os.path.splitext(f)[0]
return m.replace("/",".")
def ModuleFileName(self,idx):
"""
Parameters:
-----------
index
"""
return self._module_list[idx]
def ModuleNames(self,idx=None):
"""
get module name from file list
Parameters:
-----------
idx: if defined return only this filename from list <None>
else return list of filenames
"""
if jb.is_number(idx):
return os.path.basename( self._module_list[idx] ).replace(self.extention,"")
l=[]
for f in self.module_list:
l.append(os.path.basename(f).replace(self.extention,""))
return l
#--- get_path_and_file
# p,f=os.path.split( self.file_list[idx] )
def FindModules(self,**kwargs):
"""
find modules /commands under jumeg with defined permissions
Parameters:
-----------
stage_prefix: <jumeg">
prefix : <jumeg>
postfix : <None>
extention : <.py>
permission : <os.X_OK|os.R_OK>
function : <get_args>
Results:
---------
list of module names
"""
self._find_modules(**kwargs)
return self.ModuleNames()
def update(self,**kwargs):
""" """
self.stage_prefix = kwargs.get("stage_prefix",self.stage_prefix)
self.prefix = kwargs.get("prefix",self.prefix)
self.postfix = kwargs.get("postfix",self.postfix)
self.extention = kwargs.get("extention",self.extention)
self.permission = kwargs.get("permission",self.permission)
self.function = kwargs.get("function",self.function)
self._module_list = []
self._jumeg_path_list = self.get_jumeg_path_list_from_pythonpath()
def _walk(self,**kwargs):
"""
search recursive for executable modules
Parameters:
-----------
stage_prefix : stage prefix <jumeg>
prefix : file prefix <jumeg>
postfix : file postfix <None>
extention : file extention <".py>
permission: file permission <os.X_OK|os.R_OK>
Results:
--------
list of executable modules, full filename
"""
self.update(**kwargs)
if self.debug:
logger.debug("walk trough path list: "+ "\n".join(self._jumeg_path_list) )
skip_dir_set = set(self.skip_dir_list)
for p in ( self._jumeg_path_list ): # PROB: path or . or link pointing to one dir or sub, parent dir
for root, dirs, files in os.walk(p):
if (set(root.split(os.sep)) & skip_dir_set): continue
for f in set(files):
if self.prefix:
if not f.startswith(self.prefix): continue
if self.extention:
if not f.endswith(self.extention): continue
if os.access(root+"/"+f,self.permission):
fmodule = os.path.join(root,f)
if self._is_function_in_module(fmodule):
self._module_list.append(fmodule)
self._module_list.sort()
if self.debug:
logger.debug("modules found : \n"+ "\n".join(self._module_list))
return self._module_list
def _find_modules(self,**kwargs):
"""
search recursive for executable modules
exclude doubles due to e.g links
Parameters:
-----------
stage_prefix : stage prefix <jumeg>
prefix : file prefix <jumeg>
postfix : file postfix <None>
extention : file extention <".py>
permission: file permission <os.X_OK|os.R_OK>
Results:
--------
list of modules,full filename, exclude doubles
"""
self.update(**kwargs)
fdict = {}
skip_dir_set = set(self.skip_dir_list)
for pstart in ( self._jumeg_path_list ): # PROB: path or . or link pointing to one dir or sub, parent dir
for p in list(Path(pstart).glob('**/*' + self.extention)):
if not p.is_file(): continue
f = os.fspath(p.resolve()) # abs path
#--- is executable
if not os.access(f,self.permission): continue
#--- not in ../gui ../test ...
if (set(f.split(pstart)[-1].split(os.sep)) & skip_dir_set): continue
#--- starts with jumeg
if self.prefix:
if not os.path.basename(f).startswith(self.prefix): continue
#--- if module has <get_args> function
if self._is_function_in_module(f):
#--- get fsize for dict key
sz = p.stat().st_size
if not fdict.get(sz): fdict[sz]={}
fdict[sz][ p.stat().st_mtime ] = f # st_inoino
# --- ck double items
# --- exclude same files dueto links or start path differences
for sz in fdict.keys():
for mt in fdict[sz].keys():
self._module_list.append( fdict[sz][mt] )
self._module_list.sort()
return self._module_list
| |
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import textwrap
import mock
import pep8
from nova.hacking import checks
from nova import test
class HackingTestCase(test.NoDBTestCase):
"""This class tests the hacking checks in nova.hacking.checks by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method. The parameter names in the check method dictate what type of
object is passed to the check method. The parameter types are::
logical_line: A processed line with the following modifications:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with "xxx" of same length.
- Comments removed.
physical_line: Raw line of text from the input file.
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
total_lines: number of lines in the input file
blank_lines: blank lines before this one
indent_char: indentation character in this file (" " or "\t")
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
filename: Path of the file being run through pep8
When running a test on a check method the return will be False/None if
there is no violation in the sample input. If there is an error a tuple is
returned with a position in the line, and a message. So to check the result
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
def test_virt_driver_imports(self):
expect = (0, "N311: importing code from other virt drivers forbidden")
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"import nova.virt.libvirt.utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/libvirt/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"import nova.virt.firewall",
"./nova/virt/libvirt/firewall.py"))
def test_virt_driver_config_vars(self):
self.assertIsInstance(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/xenapi/driver.py"), tuple)
self.assertIsNone(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/libvirt/volume.py"))
def test_no_vi_headers(self):
lines = ['Line 1\n', 'Line 2\n', 'Line 3\n', 'Line 4\n', 'Line 5\n',
'Line 6\n', 'Line 7\n', 'Line 8\n', 'Line 9\n', 'Line 10\n',
'Line 11\n', 'Line 12\n', 'Line 13\n', 'Line14\n', 'Line15\n']
self.assertIsNone(checks.no_vi_headers(
"Test string foo", 1, lines))
self.assertEqual(len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
2, lines))), 2)
self.assertIsNone(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
6, lines))
self.assertIsNone(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
9, lines))
self.assertEqual(len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
14, lines))), 2)
self.assertIsNone(checks.no_vi_headers(
"Test end string for vi",
15, lines))
def test_assert_true_instance(self):
self.assertEqual(len(list(checks.assert_true_instance(
"self.assertTrue(isinstance(e, "
"exception.BuildAbortException))"))), 1)
self.assertEqual(
len(list(checks.assert_true_instance("self.assertTrue()"))), 0)
def test_assert_equal_type(self):
self.assertEqual(len(list(checks.assert_equal_type(
"self.assertEqual(type(als['QuicAssist']), list)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_type("self.assertTrue()"))), 0)
def test_assert_equal_in(self):
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(a in b, True)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual('str' in 'string', True)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(any(a==1 for a in b), True)"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, a in b)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, 'str' in 'string')"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, any(a==1 for a in b))"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(a in b, False)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual('str' in 'string', False)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(any(a==1 for a in b), False)"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, a in b)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, 'str' in 'string')"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, any(a==1 for a in b))"))), 0)
def test_assert_equal_none(self):
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))), 1)
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(None, A)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_none("self.assertIsNone()"))), 0)
def test_assert_true_or_false_with_in_or_not_in(self):
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A not in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A not in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A not in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A not in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in 'some string with spaces')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in 'some string with spaces')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in ['1', '2', '3'])"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in [1, 2, 3])"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(any(A > 5 for A in B))"))), 0)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(any(A > 5 for A in B), 'some message')"))), 0)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(some in list1 and some2 in list2)"))), 0)
def test_no_translate_debug_logs(self):
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.debug(_('foo'))", "nova/scheduler/foo.py"))), 1)
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.debug('foo')", "nova/scheduler/foo.py"))), 0)
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.info(_('foo'))", "nova/scheduler/foo.py"))), 0)
def test_no_setting_conf_directly_in_tests(self):
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = 1", "nova/tests/test_foo.py"))), 1)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.group.option = 1", "nova/tests/test_foo.py"))), 1)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = foo = 1", "nova/tests/test_foo.py"))), 1)
# Shouldn't fail with comparisons
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option == 'foo'", "nova/tests/test_foo.py"))), 0)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option != 1", "nova/tests/test_foo.py"))), 0)
# Shouldn't fail since not in nova/tests/
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = 1", "nova/compute/foo.py"))), 0)
def test_log_translations(self):
logs = ['audit', 'error', 'info', 'warning', 'critical', 'warn',
'exception']
levels = ['_LI', '_LW', '_LE', '_LC']
debug = "LOG.debug('OK')"
audit = "LOG.audit(_('OK'))"
self.assertEqual(
0, len(list(checks.validate_log_translations(debug, debug, 'f'))))
self.assertEqual(
0, len(list(checks.validate_log_translations(audit, audit, 'f'))))
for log in logs:
bad = 'LOG.%s("Bad")' % log
self.assertEqual(1,
len(list(
checks.validate_log_translations(bad, bad, 'f'))))
ok = "LOG.%s('OK') # noqa" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
ok = "LOG.%s(variable)" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
for level in levels:
ok = "LOG.%s(%s('OK'))" % (log, level)
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
def test_no_mutable_default_args(self):
self.assertEqual(1, len(list(checks.no_mutable_default_args(
" def fake_suds_context(calls={}):"))))
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def get_info_from_bdm(virt_type, bdm, mapping=[])"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined = []"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined, undefined = [], {}"))))
def test_check_explicit_underscore_import(self):
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder/tests/other_files.py"))), 1)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files.py"))), 1)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _, _LW",
"cinder/tests/other_files2.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files2.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"_ = translations.ugettext",
"cinder/tests/other_files3.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files3.py"))), 0)
def test_use_jsonutils(self):
def __get_msg(fun):
msg = ("N324: jsonutils.%(fun)s must be used instead of "
"json.%(fun)s" % {'fun': fun})
return [(0, msg)]
for method in ('dump', 'dumps', 'load', 'loads'):
self.assertEqual(
__get_msg(method),
list(checks.use_jsonutils("json.%s(" % method,
"./nova/virt/xenapi/driver.py")))
self.assertEqual(0,
len(list(checks.use_jsonutils("json.%s(" % method,
"./plugins/xenserver/script.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils("jsonx.%s(" % method,
"./nova/virt/xenapi/driver.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils("json.dumb",
"./nova/virt/xenapi/driver.py"))))
# We are patching pep8 so that only the check under test is actually
# installed.
@mock.patch('pep8._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def _run_check(self, code, checker, filename=None):
pep8.register_check(checker)
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(filename=filename, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def _assert_has_errors(self, code, checker, expected_errors=None,
filename=None):
actual_errors = [e[:3] for e in
self._run_check(code, checker, filename)]
self.assertEqual(expected_errors or [], actual_errors)
def _assert_has_no_errors(self, code, checker, filename=None):
self._assert_has_errors(code, checker, filename=filename)
def test_str_unicode_exception(self):
checker = checks.CheckForStrUnicodeExc
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = str(e)
return p
"""
errors = [(5, 16, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = unicode(a) + str(b)
except ValueError as e:
p = e
return p
"""
self._assert_has_no_errors(code, checker)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = unicode(e)
return p
"""
errors = [(5, 20, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + str(ve)
p = e
return p
"""
errors = [(8, 20, 'N325'), (8, 29, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + unicode(ve)
p = str(e)
return p
"""
errors = [(8, 20, 'N325'), (8, 33, 'N325'), (9, 16, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
def test_api_version_decorator_check(self):
code = """
@some_other_decorator
@wsgi.api_version("2.5")
def my_method():
pass
"""
self._assert_has_errors(code, checks.check_api_version_decorator,
expected_errors=[(2, 0, "N332")])
def test_oslo_namespace_imports_check(self):
code = """
from oslo.concurrency import processutils
"""
self._assert_has_errors(code, checks.check_oslo_namespace_imports,
expected_errors=[(1, 0, "N333")])
def test_oslo_namespace_imports_check_2(self):
code = """
from oslo import i18n
"""
self._assert_has_errors(code, checks.check_oslo_namespace_imports,
expected_errors=[(1, 0, "N333")])
def test_oslo_namespace_imports_check_3(self):
code = """
import oslo.messaging
"""
self._assert_has_errors(code, checks.check_oslo_namespace_imports,
expected_errors=[(1, 0, "N333")])
def test_oslo_assert_raises_regexp(self):
code = """
self.assertRaisesRegexp(ValueError,
"invalid literal for.*XYZ'$",
int,
'XYZ')
"""
self._assert_has_errors(code, checks.assert_raises_regexp,
expected_errors=[(1, 0, "N335")])
def test_api_version_decorator_check_no_errors(self):
code = """
class ControllerClass():
@wsgi.api_version("2.5")
def my_method():
pass
"""
self._assert_has_no_errors(code, checks.check_api_version_decorator)
def test_trans_add(self):
checker = checks.CheckForTransAdd
code = """
def fake_tran(msg):
return msg
_ = fake_tran
_LI = _
_LW = _
_LE = _
_LC = _
def f(a, b):
msg = _('test') + 'add me'
msg = _LI('test') + 'add me'
msg = _LW('test') + 'add me'
msg = _LE('test') + 'add me'
msg = _LC('test') + 'add me'
msg = 'add to me' + _('test')
return msg
"""
errors = [(13, 10, 'N326'), (14, 10, 'N326'), (15, 10, 'N326'),
(16, 10, 'N326'), (17, 10, 'N326'), (18, 24, 'N326')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
msg = 'test' + 'add me'
return msg
"""
self._assert_has_no_errors(code, checker)
def test_dict_constructor_with_list_copy(self):
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([(i, connect_info[i])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" attrs = dict([(k, _from_json(v))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" type_names = dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
"foo(param=dict((k, v) for k, v in bar.items()))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([[i,i] for i in range(3)])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dd = dict([i,i] for i in range(3))"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" create_kwargs = dict(snapshot=snapshot,"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" self._render_dict(xml, data_el, data.__dict__)"))))
| |
import re
import os
import sys
import sqlite3
import subprocess
import operator
import time
# our modules
import byterange
import parse_filemap
class ParsedDirectoryInfo(object):
def __init__(self, ctime=0, program_name="", log_id=0, record_pid=0,
parent_id=0, replay_start_time=0,
replay_end_time = 0,
program_args="", replay_graph=None):
self.ctime = ctime
self.program_name = program_name
self.logid = log_id
self.record_pid = record_pid
self.parent_id = parent_id
self.replay_start_time = 0
self.replay_end_time = 0
self.program_args = program_args
self.replay_graph = replay_graph
class ReplayLogDB(object):
'''
Class holding the operations for creating, insert, querying the replay db.
Using sqlite3 for now...we might want to change the backing db store later.
Need to change create_table, replay_id_exists, and insert_replay then.
'''
def __init__(self, omniplay_path, logdb_name="replay.db",
logdb_dir="/replay_logdb",
replay_table_name="replays",
graph_table_name="graph_edges",
start=0,
finish=sys.maxint):
# Path of the omniplay root directory
self.omniplay_path = omniplay_path
# name of the logdb
self.logdb_name = logdb_name
# logdb directory (absolute path)
self.logdb_dir = os.path.abspath(logdb_dir)
# name of the table in the DB
self.replay_table_name = replay_table_name
self.graph_table_name = graph_table_name
# stateful state
self.cursor = None
self.conn = None
self.start_id = start
self.end_id = finish
assert self.end_id >= self.start_id
def init_cursor(self):
self.conn = sqlite3.connect(self.get_logdb_path())
self.cursor = self.conn.cursor()
def close_cursor(self):
self.conn.commit()
self.conn.close()
self.conn = None
self.cursor = None
def commit_transaction(self):
self.conn.commit()
def get_logdb_path(self):
return ''.join([self.logdb_dir, "/", self.logdb_name])
def get_ndx_path(self):
return ''.join([self.logdb_dir, "/", "ndx"])
def get_replay_directory(self, group_id):
return ''.join([self.logdb_dir, "/rec_", str(group_id)])
def create_tables(self):
'''
Create a new table in the db for replays
'''
c = self.cursor
# create a table indexing the replays
# date: time replay started in seconds since epoch
# id: log id
# record_pid: record pid
# program: short program name, e.g. ls
# args: arguments to the program, e.g. -l
sql = '''CREATE TABLE IF NOT EXISTS {table_name} '''.format(
table_name=self.replay_table_name)
sql += '''(date INT, id INT, record_pid INT, parent_id INT, '''
sql += '''replay_start_time INT, replay_end_time INT, '''
sql += '''program TEXT, args TEXT)'''
c.execute(sql)
sql = '''CREATE TABLE IF NOT EXISTS {table_name} '''.format(
table_name=self.graph_table_name)
sql += '''(write_id INT, write_pid INT, '''
sql += '''write_sysnum INT, write_offset INT, '''
sql += '''write_size INT, read_id INT, '''
sql += '''read_pid INT, read_sysnum INT, '''
sql += '''read_offset INT, read_size INT)'''
c.execute(sql)
sql = '''CREATE INDEX IF NOT EXISTS read_index '''
sql += '''on {table_name} '''.format(
table_name=self.graph_table_name)
sql += '''(read_id, read_pid, read_sysnum)'''
c.execute(sql)
sql = '''CREATE INDEX IF NOT EXISTS write_index '''
sql += '''on {table_name} '''.format(
table_name=self.graph_table_name)
sql += '''(write_id, write_pid, write_sysnum)'''
c.execute(sql)
self.commit_transaction()
print("Created db %s, with tables %s, %s" %
(self.logdb_name,
self.replay_table_name,
self.graph_table_name))
def get_ids(self):
'''
Returns a list of IDs in the db
'''
ids = []
c = self.cursor
for row in c.execute("SELECT id from {table_name}".format(
table_name=self.replay_table_name)):
ids.append(row[0])
return sorted(ids)
def replay_id_exists(self, replay_id):
'''
Returns True if replay_id exists in the db, False otherwise
'''
c = self.cursor
c.execute("SELECT * from {table_name} WHERE id=?".format(
table_name=self.replay_table_name),
(replay_id, ))
fetched = c.fetchone()
if fetched is None:
return False
return True
def get_parent_id(self, replay_id):
c = self.cursor
c.execute("SELECT parent_id from {table_name} WHERE id=?".format(
table_name=self.replay_table_name), (replay_id, ))
fetched = c.fetchone()
if fetched is None:
return 0
return int(fetched[0])
def get_last_id(self):
c = self.cursor
c.execute('''SELECT MAX(id) from {table_name}'''.format(
table_name=self.replay_table_name))
fetched = c.fetchone()
if fetched is None or fetched[0] is None:
return 0
return int(fetched[0])
def insert_replay(self, parsed_directory_info):
'''
Insert a replay into the DB
'''
values = (parsed_directory_info.ctime,
parsed_directory_info.logid,
parsed_directory_info.record_pid,
parsed_directory_info.parent_id,
parsed_directory_info.replay_start_time,
parsed_directory_info.replay_end_time,
parsed_directory_info.program_name,
parsed_directory_info.program_args)
self.cursor.execute(
'''INSERT INTO {table_name} VALUES (?,?,?,?,?,?,?,?)'''.format(
table_name=self.replay_table_name), values)
self.commit_transaction()
def get_all_replays(self):
c = self.cursor
c.execute('''SELECT * FROM {table_name}'''.format(
table_name=self.replay_table_name))
replays = []
fetched = c.fetchall()
for row in fetched:
(date, group_id, record_pid, parent_id, program, args) = row
replays.append(row)
return replays
def insert_graph(self, graph_edges):
'''
Insert a replay into the DB
'''
cursor = self.cursor
for edge in graph_edges:
(read_id, read_pid, read_sysnum, read_offset, read_size,
write_id, write_pid, write_sysnum, write_offset, write_size) = \
(edge.read_log, edge.read_pid, edge.read_sysnum,
edge.read_offset, edge.read_size,
edge.write_log, edge.write_pid, edge.write_sysnum,
edge.write_offset, edge.write_size)
values = (write_id, write_pid,
write_sysnum, write_offset, write_size,
read_id, read_pid, read_sysnum,
read_offset, read_size)
cursor.execute(
'''INSERT INTO ''' +
'''{table_name} VALUES (?,?,?,?,?,?,?,?,?,?)'''.format(
table_name=self.graph_table_name), values)
def parse_ckpt(self, rec_dir):
program_name = None
record_pid = None
program_args = ""
replay_time = None
parent_id = 0
parse_ckpt = self.omniplay_path + "/test/parseckpt"
cmd = ''.join([parse_ckpt, " ", rec_dir])
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
output = process.communicate()[0]
# split by newline, parse for the checkpoint information
lines = output.split('\n')
for (line_count, line) in enumerate(lines):
line = line.strip()
if line.startswith("record pid:"):
fields = line.split(" ")
if len(fields) != 3:
print("ERROR: parseckpt format must have changed!" +
"Can't get record pid")
print("See line %d: %s (directory %s)" %
(line_count, line, rec_dir))
return None
try:
record_pid = int(fields[2])
except ValueError:
print("ERROR: parseckpt format must have changed!" +
"Can't parse record pid")
print("See line %d: %s (directory %s)" %
(line_count, line, rec_dir))
return None
elif line.startswith("parent record group id:"):
fields = line.split(" ")
if len(fields) != 5:
print("ERROR: parseckpt format must have changed! " +
"Can't get parent id")
print("See line %d: %s (directory %s)"
% (line_count, line, rec_dir))
return None
try:
parent_id = int(fields[4])
except ValueError:
print("ERROR: parseckpt format must have changed! " +
"Can't parse parent id")
print("See line %d: %s (directory %s)" %
(line_count, line, rec_dir))
return None
elif line.startswith("time of replay is:"):
fields = line.split(" ")
if len(fields) != 8:
print("ERROR: parseckpt format must have changed!" +
"Can't read replay time!")
print("See line %d: %s (directory %s)" %
(line_count, line, rec_dir))
try:
replay_time = int(fields[4])
except ValueError:
print("ERROR: parseckpt format must have changed!" +
"Can't parse replay time")
print("See line %d: %s (directory %s)" %
(line_count, line, rec_dir))
return None
elif line.startswith("Argument"):
fields = line.split(" ")
if len(fields) < 4:
print("ERROR: parseckpt format must have changed!" +
"Can't parse arguments!")
print("See line %d: %s (directory %s)" %
(line_count, line, rec_dir))
return None
arg_num = fields[1]
try:
arg_num = int(arg_num)
except ValueError:
print("ERROR: parseckpt format must have changed!")
print("See line: %s (directory %s)" % (line, rec_dir))
return None
if arg_num == 0:
program_name = fields[3]
else:
program_args = ''.join([program_args, " ", fields[3]])
if not program_name:
print("ERROR: (%s) parseckpt did not have a program name. " +
"Treating this as invalid replay" % rec_dir)
return None
if not record_pid:
print("ERROR: (%s) parseckpt did not have a record pid. " +
"Treating this as invalid replay" % rec_dir)
return None
if not replay_time:
print("ERROR: (%s) parseckpt did not have a replay time. Using 0"
% rec_dir)
replay_time = 0
return (program_name, record_pid, parent_id, program_args, replay_time)
def get_last_modified_klog_time(self, directory):
klogs = filter(lambda x: x.startswith("klog"), os.listdir(directory))
# make absolute path
klogs = map(lambda x: directory + "/" + x, klogs)
last_modified_time = 0
for klog in klogs:
mtime = os.stat(klog).st_mtime
if (mtime > last_modified_time):
last_modified_time = mtime
return last_modified_time
def populate(self):
'''
Goes through the replay_logdb directory
and inserts a record for replays that
it already hasn't inserted.
'''
time_start = time.time()
# get a list of replay directories
replay_directories = os.listdir(self.logdb_dir)
group_ids = [int(x.split("_")[1]) for x in replay_directories
if x.startswith("rec_")]
last_group_id = self.get_last_id()
# only get the directories that we havne't already inserted
group_ids = sorted(filter(lambda x: x > last_group_id, group_ids))
# Only populate between a certain range
group_ids = sorted(filter(lambda x: x >= self.start_id, group_ids))
group_ids = sorted(filter(lambda x: x < self.end_id, group_ids))
replay_directories = []
for group_id in group_ids:
replay_directories.append(''.join([self.logdb_dir,
"/rec_", str(group_id)]))
# filter out everything that is not a directory
replay_directories = filter(lambda x: os.path.isdir(x),
replay_directories)
if self.cursor is None:
print("Error: cursor is not inited, could not populate db.")
print("Error: Please init the cursor before callling this method")
return
print(replay_directories)
for directory in replay_directories:
## parse ckpt
# get ID from logdb
logid = 0
try:
fields = directory.split("_")
logid = int(fields[-1])
except:
# 0 for default
logid = 0
print("Could not get group id from directory " +
"%s, treating as invalid replay directory" % directory)
continue
# see if id in db
if self.replay_id_exists(logid):
print("Skipping %s because it's already in the db" % directory)
continue
parsed_directory_info = self.parse_directory(logid, directory)
if parsed_directory_info is None:
if directory != self.get_logdb_path() and \
directory != self.get_ndx_path():
print("Could not parse %s, treating as invalid replay"
% directory)
continue
assert(parsed_directory_info is not None)
assert(parsed_directory_info.replay_graph is not None)
self.insert_replay(parsed_directory_info)
self.insert_graph(parsed_directory_info.replay_graph)
# commit the insert
self.commit_transaction()
print("Inserted replay id %d, parent %d" %
(parsed_directory_info.logid,
parsed_directory_info.parent_id))
time_end = time.time()
print("Time it took to populate the db: %f seconds" %
(time_end - time_start))
def lookup_writers(self, read_byterange, copy_meta=False):
self.cursor.execute("SELECT write_id, write_pid, write_sysnum, " +
"write_offset, write_size, " +
"read_id, read_pid, read_sysnum, read_offset, " +
"read_size from {table_name} " +
"WHERE read_id=? AND read_pid=? AND read_sysnum=?".format(
table_name=self.graph_table_name),
(read_byterange.group_id,
read_byterange.pid,
read_byterange.syscall))
fetched = self.cursor.fetchall()
byteranges = []
rows = []
for row in fetched:
rows.append(row)
# sort by group_id, pid, sysnum, offset
# XXX there's probably some way to do this in SQL
# but not going to right now
rows = sorted(rows, key=operator.itemgetter(5, 6, 7, 8))
offset = read_byterange.offset
size = read_byterange.size
if copy_meta:
meta = read_byterange.meta.copy()
else:
meta = {}
for row in rows:
(write_id, write_pid, write_sysnum, write_offset, write_size,
read_id, read_pid, read_sysnum, read_offset, read_size) = \
row
# the returned range from the DB may be larger than what we care for
if offset >= read_offset and offset < read_offset + read_size:
diff = offset - read_offset
if offset + size <= read_offset + read_size:
byteranges.append(byterange.ByteRange(write_id,
write_pid,
write_sysnum,
write_offset + diff,
size,
meta=meta))
# XXX this looks wrong
break
else:
# if the range falls between what the DB returns us
diff_size = read_offset + read_size - offset
offset = read_offset + read_size
size = size - diff_size
byteranges.append(byterange.ByteRange(write_id,
write_pid,
write_sysnum,
write_offset + diff,
diff_size,
meta=meta))
return byteranges
def lookup_readers(self, write_byterange, copy_meta=False):
self.cursor.execute("SELECT write_id, write_pid, write_sysnum, " +
"write_offset, write_size, " +
"read_id, read_pid, read_sysnum, " +
"read_offset, read_size from {table_name} " +
"WHERE write_id=? AND write_pid=? AND write_sysnum=?".format(
table_name=self.graph_table_name),
(write_byterange.group_id, write_byterange.pid, write_byterange.syscall))
fetched = self.cursor.fetchall()
byteranges = []
rows = []
for row in fetched:
rows.append(row)
# sort by group_id, pid, sysnum, offset
# XXX there's probably some way to do this in SQL but not going to right now
rows = sorted(rows, key=operator.itemgetter(0, 1, 2, 3))
offset = write_byterange.offset
size = write_byterange.size
if copy_meta:
meta = write_byterange.meta.copy()
else:
meta = {}
for row in rows:
(write_id, write_pid, write_sysnum, write_offset, write_size,
read_id, read_pid, read_sysnum, read_offset, read_size) = row
# the returned range from the DB may be larger than what we care for
if offset >= write_offset and offset < write_offset + write_size:
diff = offset - write_offset
if offset + size <= write_offset + write_size:
byteranges.append(byterange.ByteRange(read_id,
read_pid,
read_sysnum,
read_offset + diff,
size,
meta=meta))
else:
# if the range falls between what the DB returns us
diff_size = write_offset + write_size - offset
offset = write_offset + write_size
size = size - diff_size
byteranges.append(byterange.ByteRange(read_id,
read_pid,
read_sysnum,
read_offset + diff,
diff_size,
meta=meta))
return byteranges
def parse_directory(self, logid, logdb_dir):
'''
Calls the parseckpt program and parses its output.
Returns a tuple (program_name, log_id, record_pid, args)
Returns None if it can't parse the log directory
'''
if not os.path.isdir(logdb_dir):
#print("%s is not a directory" % logdb_dir)
return None
ckpt = logdb_dir + "/ckpt"
if not os.path.isfile(ckpt):
print("No ckpt in directory %s, skipping" % logdb_dir)
return None
# get a list of replay directories
klog_directories = os.listdir(logdb_dir)
# filter out everything that is not a directory
klog_directories = filter(lambda x: x.startswith("klog"), klog_directories)
# Gets the full path
klog_directories = map(lambda x: ''.join([logdb_dir, "/", x]), klog_directories)
# get the time the ckpt was last modified
# (ideally we would want the creation time, but it doesn't seem like
# it's easy to do in Python)
ctime = int(os.stat(ckpt).st_ctime)
# execute parseckpt
ckpt_info = self.parse_ckpt(logdb_dir)
if not ckpt_info:
# can't parse the ckpt, just skip this replay
return None
(program_name, record_pid, parent_id, program_args, replay_time) = ckpt_info
replay_endtime = self.get_last_modified_klog_time(logdb_dir)
graph_edges = []
pipeInfo = parse_filemap.PipeInfo(self)
for directory in klog_directories:
# First, figure out the record pid
fields = directory.split(".")
pid = int(fields[-1])
# Now, parse the output of the parseklog file
cmd = ''.join([self.omniplay_path, "/test/parseklog ", directory, " -g"])
logproc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
graphoutput = logproc.communicate()[0]
lines = graphoutput.split('\n')
for line in lines:
line = line.strip()
match = re.match("^([0-9]+) ([0-9]+) ([0-9]+) {([0-9]+), ([0-9]+), ([0-9]+), ([0-9]+), ([0-9]+)}", line)
if match is not None:
graph_edges.append(parse_filemap.GraphEdge(logid, pid,
int(match.group(1)), int(match.group(2)),
int(match.group(3)), int(match.group(4)),
int(match.group(5)), int(match.group(6)),
int(match.group(7)), int(match.group(8))))
else:
match = re.match("^pipe: ([0-9]+) ([0-9]+) ([0-9]+) {([0-9]+), ([0-9]+), ([0-9]+), ([0-9]+), ([0-9]+)}", line)
if match is not None:
if (int(match.group(5)) == 0):
pipeInfo.add_ordered_pipe(logid, pid,
int(match.group(1)), int(match.group(2)),
int(match.group(3)), int(match.group(4)),
int(match.group(6)), int(match.group(7)),
int(match.group(8)))
else:
graph_edges.append(parse_filemap.GraphEdge(logid, pid,
int(match.group(1)), int(match.group(2)),
int(match.group(3)), int(match.group(4)),
int(match.group(5)), int(match.group(6)),
int(match.group(7)), int(match.group(8))))
else:
match = re.match("^pipe: ([0-9]+), ([0-9]+), ([0-9]+) {([0-9]+)} {([0-9]+)}", line)
if match is not None:
#pipe: writer_id, pipe_id, sysnum {size} {start_clock}
pipeInfo.add_pipe(logid, int(match.group(2)), pid,
int(match.group(3)), int(match.group(1)),
int(match.group(4)), int(match.group(5)))
if logproc.returncode < 0:
print("parseklog for %s failed with %d" %
(directory, logproc.returncode))
return None
# add pipe information to graph edges
pipeInfo.compute_pipes(graph_edges)
return ParsedDirectoryInfo(ctime, program_name, logid,
record_pid, parent_id,
replay_time, replay_endtime,
program_args, graph_edges)
def get_program_args(self, group_id):
self.cursor.execute(
"SELECT program from {table_name} WHERE id=?".format(
table_name=self.replay_table_name
), (group_id,)
)
fetched = self.cursor.fetchone()
if fetched is None or fetched[0] is None:
program = None
else:
program = fetched[0]
self.cursor.execute(
"SELECT args from {table_name} WHERE id=?".format(
table_name=self.replay_table_name
), (group_id,)
)
fetched = self.cursor.fetchone()
if fetched is None or fetched[0] is None:
args = None
else:
args = fetched[0]
if program is None:
return None
elif args is None:
return program
else:
return program + args
| |
"""Gaussian Mixture Model."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
import numpy as np
from scipy import linalg
from .base import BaseMixture, _check_shape
from ..externals.six.moves import zip
from ..utils import check_array
from ..utils.validation import check_is_fitted
###############################################################################
# Gaussian mixture shape checkers used by the GaussianMixture class
def _check_weights(weights, n_components):
"""Check the user provided 'weights'.
Parameters
----------
weights : array-like, shape (n_components,)
The proportions of components of each mixture.
n_components : int
Number of components.
Returns
-------
weights : array, shape (n_components,)
"""
weights = check_array(weights, dtype=[np.float64, np.float32],
ensure_2d=False)
_check_shape(weights, (n_components,), 'weights')
# check range
if (any(np.less(weights, 0.)) or
any(np.greater(weights, 1.))):
raise ValueError("The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights), np.max(weights)))
# check normalization
if not np.allclose(np.abs(1. - np.sum(weights)), 0.):
raise ValueError("The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f" % np.sum(weights))
return weights
def _check_means(means, n_components, n_features):
"""Validate the provided 'means'.
Parameters
----------
means : array-like, shape (n_components, n_features)
The centers of the current components.
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
means : array, (n_components, n_features)
"""
means = check_array(means, dtype=[np.float64, np.float32], ensure_2d=False)
_check_shape(means, (n_components, n_features), 'means')
return means
def _check_precision_positivity(precision, covariance_type):
"""Check a precision vector is positive-definite."""
if np.any(np.less_equal(precision, 0.0)):
raise ValueError("'%s precision' should be "
"positive" % covariance_type)
def _check_precision_matrix(precision, covariance_type):
"""Check a precision matrix is symmetric and positive-definite."""
if not (np.allclose(precision, precision.T) and
np.all(linalg.eigvalsh(precision) > 0.)):
raise ValueError("'%s precision' should be symmetric, "
"positive-definite" % covariance_type)
def _check_precisions_full(precisions, covariance_type):
"""Check the precision matrices are symmetric and positive-definite."""
for k, prec in enumerate(precisions):
prec = _check_precision_matrix(prec, covariance_type)
def _check_precisions(precisions, covariance_type, n_components, n_features):
"""Validate user provided precisions.
Parameters
----------
precisions : array-like,
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : string
n_components : int
Number of components.
n_features : int
Number of features.
Returns
-------
precisions : array
"""
precisions = check_array(precisions, dtype=[np.float64, np.float32],
ensure_2d=False,
allow_nd=covariance_type is 'full')
precisions_shape = {'full': (n_components, n_features, n_features),
'tied': (n_features, n_features),
'diag': (n_components, n_features),
'spherical': (n_components,)}
_check_shape(precisions, precisions_shape[covariance_type],
'%s precision' % covariance_type)
_check_precisions = {'full': _check_precisions_full,
'tied': _check_precision_matrix,
'diag': _check_precision_positivity,
'spherical': _check_precision_positivity}
_check_precisions[covariance_type](precisions, covariance_type)
return precisions
###############################################################################
# Gaussian mixture parameters estimators (used by the M-Step)
ESTIMATE_PRECISION_ERROR_MESSAGE = ("The algorithm has diverged because of "
"too few samples per components. Try to "
"decrease the number of components, "
"or increase reg_covar.")
def _estimate_gaussian_precisions_cholesky_full(resp, X, nk, means, reg_covar):
"""Estimate the full precision matrices.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
precisions_chol : array, shape (n_components, n_features, n_features)
The cholesky decomposition of the precision matrix.
"""
n_components, n_features = means.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k in range(n_components):
diff = X - means[k]
covariance = np.dot(resp[:, k] * diff.T, diff) / nk[k]
covariance.flat[::n_features + 1] += reg_covar
try:
cov_chol = linalg.cholesky(covariance, lower=True)
except linalg.LinAlgError:
raise ValueError(ESTIMATE_PRECISION_ERROR_MESSAGE)
precisions_chol[k] = linalg.solve_triangular(cov_chol,
np.eye(n_features),
lower=True).T
return precisions_chol
def _estimate_gaussian_precisions_cholesky_tied(resp, X, nk, means, reg_covar):
"""Estimate the tied precision matrix.
Parameters
----------
resp : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
precisions_chol : array, shape (n_features, n_features)
The cholesky decomposition of the precision matrix.
"""
n_samples, n_features = X.shape
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(nk * means.T, means)
covariances = avg_X2 - avg_means2
covariances /= n_samples
covariances.flat[::len(covariances) + 1] += reg_covar
try:
cov_chol = linalg.cholesky(covariances, lower=True)
except linalg.LinAlgError:
raise ValueError(ESTIMATE_PRECISION_ERROR_MESSAGE)
precisions_chol = linalg.solve_triangular(cov_chol, np.eye(n_features),
lower=True).T
return precisions_chol
def _estimate_gaussian_precisions_cholesky_diag(resp, X, nk, means, reg_covar):
"""Estimate the diagonal precision matrices.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
precisions_chol : array, shape (n_components, n_features)
The cholesky decomposition of the precision matrix.
"""
avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
avg_means2 = means ** 2
avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(ESTIMATE_PRECISION_ERROR_MESSAGE)
return 1. / np.sqrt(covariances)
def _estimate_gaussian_precisions_cholesky_spherical(resp, X, nk, means,
reg_covar):
"""Estimate the spherical precision matrices.
Parameters
----------
responsibilities : array-like, shape (n_samples, n_components)
X : array-like, shape (n_samples, n_features)
nk : array-like, shape (n_components,)
means : array-like, shape (n_components, n_features)
reg_covar : float
Returns
-------
precisions_chol : array, shape (n_components,)
The cholesky decomposition of the precision matrix.
"""
avg_X2 = np.dot(resp.T, X * X) / nk[:, np.newaxis]
avg_means2 = means ** 2
avg_X_means = means * np.dot(resp.T, X) / nk[:, np.newaxis]
covariances = (avg_X2 - 2 * avg_X_means + avg_means2 + reg_covar).mean(1)
if np.any(np.less_equal(covariances, 0.0)):
raise ValueError(ESTIMATE_PRECISION_ERROR_MESSAGE)
return 1. / np.sqrt(covariances)
def _estimate_gaussian_parameters(X, resp, reg_covar, covariance_type):
"""Estimate the Gaussian distribution parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input data array.
resp : array-like, shape (n_samples, n_features)
The responsibilities for each data sample in X.
reg_covar : float
The regularization added to the diagonal of the covariance matrices.
covariance_type : {'full', 'tied', 'diag', 'spherical'}
The type of precision matrices.
Returns
-------
nk : array, shape (n_components,)
The numbers of data samples in the current components.
means : array, shape (n_components, n_features)
The centers of the current components.
precisions_cholesky : array
The cholesky decomposition of sample precisions of the current
components. The shape depends of the covariance_type.
"""
nk = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
means = np.dot(resp.T, X) / nk[:, np.newaxis]
precs_chol = {"full": _estimate_gaussian_precisions_cholesky_full,
"tied": _estimate_gaussian_precisions_cholesky_tied,
"diag": _estimate_gaussian_precisions_cholesky_diag,
"spherical": _estimate_gaussian_precisions_cholesky_spherical
}[covariance_type](resp, X, nk, means, reg_covar)
return nk, means, precs_chol
###############################################################################
# Gaussian mixture probability estimators
def _estimate_log_gaussian_prob_full(X, means, precisions_chol):
"""Estimate the log Gaussian probability for 'full' precision.
Parameters
----------
X : array-like, shape (n_samples, n_features)
means : array-like, shape (n_components, n_features)
precisions_chol : array-like, shape (n_components, n_features, n_features)
Cholesky decompositions of the precision matrices.
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
log_prob = np.empty((n_samples, n_components))
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
log_det = -2. * np.sum(np.log(np.diagonal(prec_chol)))
y = np.dot(X - mu, prec_chol)
log_prob[:, k] = -.5 * (n_features * np.log(2. * np.pi) + log_det +
np.sum(np.square(y), axis=1))
return log_prob
def _estimate_log_gaussian_prob_tied(X, means, precision_chol):
"""Estimate the log Gaussian probability for 'tied' precision.
Parameters
----------
X : array-like, shape (n_samples, n_features)
means : array-like, shape (n_components, n_features)
precision_chol : array-like, shape (n_features, n_features)
Cholesky decomposition of the precision matrix.
Returns
-------
log_prob : array-like, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
log_prob = np.empty((n_samples, n_components))
log_det = -2. * np.sum(np.log(np.diagonal(precision_chol)))
for k, mu in enumerate(means):
y = np.dot(X - mu, precision_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
log_prob = -.5 * (n_features * np.log(2. * np.pi) + log_det + log_prob)
return log_prob
def _estimate_log_gaussian_prob_diag(X, means, precisions_chol):
"""Estimate the log Gaussian probability for 'diag' precision.
Parameters
----------
X : array-like, shape (n_samples, n_features)
means : array-like, shape (n_components, n_features)
precisions_chol : array-like, shape (n_components, n_features)
Cholesky decompositions of the precision matrices.
Returns
-------
log_prob : array-like, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
precisions = precisions_chol ** 2
log_prob = -.5 * (n_features * np.log(2. * np.pi) -
np.sum(np.log(precisions), 1) +
np.sum((means ** 2 * precisions), 1) -
2. * np.dot(X, (means * precisions).T) +
np.dot(X ** 2, precisions.T))
return log_prob
def _estimate_log_gaussian_prob_spherical(X, means, precisions_chol):
"""Estimate the log Gaussian probability for 'spherical' precision.
Parameters
----------
X : array-like, shape (n_samples, n_features)
means : array-like, shape (n_components, n_features)
precisions_chol : array-like, shape (n_components, )
Cholesky decompositions of the precision matrices.
Returns
-------
log_prob : array-like, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
precisions = precisions_chol ** 2
log_prob = -.5 * (n_features * np.log(2 * np.pi) -
n_features * np.log(precisions) +
np.sum(means ** 2, 1) * precisions -
2 * np.dot(X, means.T * precisions) +
np.outer(np.sum(X ** 2, axis=1), precisions))
return log_prob
class GaussianMixture(BaseMixture):
"""Gaussian Mixture.
Representation of a Gaussian mixture model probability distribution.
This class allows to estimate the parameters of a Gaussian mixture
distribution.
Parameters
----------
n_components : int, defaults to 1.
The number of mixture components.
covariance_type : {'full', 'tied', 'diag', 'spherical'},
defaults to 'full'.
String describing the type of covariance parameters to use.
Must be one of::
'full' (each component has its own general covariance matrix).
'tied' (all components share the same general covariance matrix),
'diag' (each component has its own diagonal covariance matrix),
'spherical' (each component has its own single variance),
tol : float, defaults to 1e-3.
The convergence threshold. EM iterations will stop when the
log_likelihood average gain is below this threshold.
reg_covar : float, defaults to 0.
Non-negative regularization added to the diagonal of covariance.
Allows to assure that the covariance matrices are all positive.
max_iter : int, defaults to 100.
The number of EM iterations to perform.
n_init : int, defaults to 1.
The number of initializations to perform. The best results is kept.
init_params : {'kmeans', 'random'}, defaults to 'kmeans'.
The method used to initialize the weights, the means and the
precisions.
Must be one of::
'kmeans' : responsibilities are initialized using kmeans.
'random' : responsibilities are initialized randomly.
weights_init : array-like, shape (n_components, ), optional
The user-provided initial weights, defaults to None.
If it None, weights are initialized using the `init_params` method.
means_init: array-like, shape (n_components, n_features), optional
The user-provided initial means, defaults to None,
If it None, means are initialized using the `init_params` method.
precisions_init: array-like, optional.
The user-provided initial precisions (inverse of the covariance
matrices), defaults to None.
If it None, precisions are initialized using the 'init_params' method.
The shape depends on 'covariance_type'::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
random_state: RandomState or an int seed, defaults to None.
A random number generator instance.
warm_start : bool, default to False.
If 'warm_start' is True, the solution of the last fitting is used as
initialization for the next call of fit(). This can speed up
convergence when fit is called several time on similar problems.
verbose : int, default to 0.
Enable verbose output. If 1 then it prints the current
initialization and each iteration step. If greater than 1 then
it prints also the log probability and the time needed
for each step.
Attributes
----------
weights_ : array, shape (n_components,)
The weights of each mixture components.
means_ : array, shape (n_components, n_features)
The mean of each mixture component.
covariances_ : array
The covariance of each mixture component.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_ : array
The precision matrices for each component in the mixture. A precision
matrix is the inverse of a covariance matrix. A covariance matrix is
symmetric positive definite so the mixture of Gaussian can be
equivalently parameterized by the precision matrices. Storing the
precision matrices instead of the covariance matrices makes it more
efficient to compute the log-likelihood of new samples at test time.
The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
precisions_cholesky_ : array
The cholesky decomposition of the precision matrices of each mixture
component. A precision matrix is the inverse of a covariance matrix.
A covariance matrix is symmetric positive definite so the mixture of
Gaussian can be equivalently parameterized by the precision matrices.
Storing the precision matrices instead of the covariance matrices makes
it more efficient to compute the log-likelihood of new samples at test
time. The shape depends on `covariance_type`::
(n_components,) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
n_iter_ : int
Number of step used by the best fit of EM to reach the convergence.
"""
def __init__(self, n_components=1, covariance_type='full', tol=1e-3,
reg_covar=1e-6, max_iter=100, n_init=1, init_params='kmeans',
weights_init=None, means_init=None, precisions_init=None,
random_state=None, warm_start=False,
verbose=0, verbose_interval=10):
super(GaussianMixture, self).__init__(
n_components=n_components, tol=tol, reg_covar=reg_covar,
max_iter=max_iter, n_init=n_init, init_params=init_params,
random_state=random_state, warm_start=warm_start,
verbose=verbose, verbose_interval=verbose_interval)
self.covariance_type = covariance_type
self.weights_init = weights_init
self.means_init = means_init
self.precisions_init = precisions_init
def _check_parameters(self, X):
"""Check the Gaussian mixture parameters are well defined."""
_, n_features = X.shape
if self.covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError("Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% self.covariance_type)
if self.weights_init is not None:
self.weights_init = _check_weights(self.weights_init,
self.n_components)
if self.means_init is not None:
self.means_init = _check_means(self.means_init,
self.n_components, n_features)
if self.precisions_init is not None:
self.precisions_init = _check_precisions(self.precisions_init,
self.covariance_type,
self.n_components,
n_features)
def _initialize(self, X, resp):
"""Initialization of the Gaussian mixture parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
n_samples, _ = X.shape
weights, means, precisions_cholesky = _estimate_gaussian_parameters(
X, resp, self.reg_covar, self.covariance_type)
weights /= n_samples
self.weights_ = (weights if self.weights_init is None
else self.weights_init)
self.means_ = means if self.means_init is None else self.means_init
if self.precisions_init is None:
self.precisions_cholesky_ = precisions_cholesky
elif self.covariance_type is 'full':
self.precisions_cholesky_ = np.array(
[linalg.cholesky(prec_init, lower=True)
for prec_init in self.precisions_init])
elif self.covariance_type is 'tied':
self.precisions_cholesky_ = linalg.cholesky(self.precisions_init,
lower=True)
else:
self.precisions_cholesky_ = self.precisions_init
def _e_step(self, X):
log_prob_norm, _, log_resp = self._estimate_log_prob_resp(X)
return np.mean(log_prob_norm), np.exp(log_resp)
def _m_step(self, X, resp):
self.weights_, self.means_, self.precisions_cholesky_ = (
_estimate_gaussian_parameters(X, resp, self.reg_covar,
self.covariance_type))
self.weights_ /= X.shape[0]
def _estimate_log_prob(self, X):
return {"full": _estimate_log_gaussian_prob_full,
"tied": _estimate_log_gaussian_prob_tied,
"diag": _estimate_log_gaussian_prob_diag,
"spherical": _estimate_log_gaussian_prob_spherical
}[self.covariance_type](X, self.means_,
self.precisions_cholesky_)
def _estimate_log_weights(self):
return np.log(self.weights_)
def _check_is_fitted(self):
check_is_fitted(self, ['weights_', 'means_', 'precisions_cholesky_'])
def _get_parameters(self):
return self.weights_, self.means_, self.precisions_cholesky_
def _set_parameters(self, params):
self.weights_, self.means_, self.precisions_cholesky_ = params
# Attributes computation
_, n_features = self.means_.shape
if self.covariance_type is 'full':
self.precisions_ = np.empty(self.precisions_cholesky_.shape)
self.covariances_ = np.empty(self.precisions_cholesky_.shape)
for k, prec_chol in enumerate(self.precisions_cholesky_):
self.precisions_[k] = np.dot(prec_chol, prec_chol.T)
cov_chol = linalg.solve_triangular(prec_chol,
np.eye(n_features))
self.covariances_[k] = np.dot(cov_chol.T, cov_chol)
elif self.covariance_type is 'tied':
self.precisions_ = np.dot(self.precisions_cholesky_,
self.precisions_cholesky_.T)
cov_chol = linalg.solve_triangular(self.precisions_cholesky_,
np.eye(n_features))
self.covariances_ = np.dot(cov_chol.T, cov_chol)
else:
self.precisions_ = self.precisions_cholesky_ ** 2
self.covariances_ = 1. / self.precisions_
def _n_parameters(self):
"""Return the number of free parameters in the model."""
_, n_features = self.means_.shape
if self.covariance_type == 'full':
cov_params = self.n_components * n_features * (n_features + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * n_features
elif self.covariance_type == 'tied':
cov_params = n_features * (n_features + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = n_features * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model on the input X.
Parameters
----------
X : array of shape (n_samples, n_dimensions)
Returns
-------
bic: float
The greater the better.
"""
return (-2 * self.score(X) * X.shape[0] +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model on the input X.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float
The greater the better.
"""
return -2 * self.score(X) * X.shape[0] + 2 * self._n_parameters()
| |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
from pandas.types.missing import isnull, notnull
from pandas.types.common import is_scalar
from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, _ensure_index, InvalidIndexError
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.internals import SingleBlockManager
from pandas.core import generic
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.index as _index
from pandas.util.decorators import Appender
from pandas.sparse.array import (make_sparse, _sparse_array_op, SparseArray,
_make_index)
from pandas._sparse import BlockIndex, IntIndex
import pandas._sparse as splib
from pandas.sparse.scipy_sparse import (_sparse_series_to_coo,
_coo_to_sparse_series)
_shared_doc_kwargs = dict(klass='SparseSeries',
axes_single_arg="{0, 'index'}")
# -----------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are
present for compatibility.
"""
def wrapper(self, other):
if isinstance(other, Series):
if not isinstance(other, SparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, name)
elif isinstance(other, DataFrame):
return NotImplemented
elif is_scalar(other):
with np.errstate(all='ignore'):
new_values = op(self.values, other)
return self._constructor(new_values,
index=self.index,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
wrapper.__name__ = name
if name.startswith("__"):
# strip special method names, e.g. `__add__` needs to be `add` when
# passed to _sparse_series_op
name = name[2:-2]
return wrapper
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join='outer', copy=False)
new_index = left.index
new_name = _maybe_match_name(left, right)
result = _sparse_array_op(left.values, right.values, op, name,
series=True)
return left._constructor(result, index=new_index, name=new_name)
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseSeries objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_series'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
if not isinstance(data, SingleBlockManager):
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
if isinstance(data, SparseArray):
if index is not None:
assert (len(index) == len(data))
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
if fill_value is None:
fill_value = data.fill_value
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
if index is None:
index = data.index.view()
data = Series(data)
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
res = make_sparse(data, kind=kind, fill_value=fill_value)
data, sparse_index, fill_value = res
else:
assert (len(data) == sparse_index.npoints)
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isnull(data) and isnull(fill_value)):
if kind == 'block':
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == 'block':
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype,
copy=copy)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
@property
def values(self):
""" return the array """
return self.block.values
def __array__(self, result=None):
""" the array interface, return my values """
return self.block.values
def get_values(self):
""" same as values """
return self.block.to_dense().view()
@property
def block(self):
return self._data._block
@property
def fill_value(self):
return self.block.fill_value
@fill_value.setter
def fill_value(self, v):
self.block.fill_value = v
@property
def sp_index(self):
return self.block.sp_index
@property
def sp_values(self):
return self.values.sp_values
@property
def npoints(self):
return self.sp_index.npoints
@classmethod
def from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
"""
Simplified alternate constructor
"""
return cls(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@property
def _constructor(self):
return SparseSeries
@property
def _constructor_expanddim(self):
from pandas.sparse.api import SparseDataFrame
return SparseDataFrame
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def as_sparse_array(self, kind=None, fill_value=None, copy=False):
""" return my self as a sparse array, do not copy by default """
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, copy=copy)
def __len__(self):
return len(self.block)
@property
def shape(self):
return self._data.shape
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
rep = '%s\n%s' % (series_rep, repr(self.sp_index))
return rep
def __array_wrap__(self, result, context=None):
"""
Gets called prior to a ufunc (and after)
See SparseArray.__array_wrap__ for detail.
"""
if isinstance(context, tuple) and len(context) == 3:
ufunc, args, domain = context
args = [getattr(a, 'fill_value', a) for a in args]
with np.errstate(all='ignore'):
fill_value = ufunc(self.fill_value, *args[1:])
else:
fill_value = self.fill_value
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=fill_value,
copy=False).__finalize__(self)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.name = getattr(obj, 'name', None)
self.fill_value = getattr(obj, 'fill_value', None)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.get_values(), skipna=skipna, **kwds)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
fill_value=self.fill_value, name=self.name)
def _unpickle_series_compat(self, state):
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
index, fill_value, sp_index = own_state[:3]
name = None
if len(own_state) > 3:
name = own_state[3]
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sp_index,
fill_value=fill_value, copy=False)
# recreate
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data)
self._set_axis(0, index)
self.name = name
def __iter__(self):
""" forward to the array """
return iter(self.values)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'sparse_time_series')
else:
object.__setattr__(self, '_subtyp', 'sparse_series')
def _ixs(self, i, axis=0):
"""
Return the i-th value or values in the SparseSeries by location
Parameters
----------
i : int, slice, or sequence of integers
Returns
-------
value : scalar (int) or Series (slice, sequence)
"""
label = self.index[i]
if isinstance(label, Index):
return self.take(i, axis=axis, convert=True)
else:
return self._get_val_at(i)
def _get_val_at(self, loc):
""" forward to the array """
return self.block.values._get_val_at(loc)
def __getitem__(self, key):
try:
return self.index.get_value(self, key)
except InvalidIndexError:
pass
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
elif key is Ellipsis:
return self
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item, must be array-like?
pass
key = _values_from_object(key)
if self.index.nlevels > 1 and isinstance(key, tuple):
# to handle MultiIndex labels
key = self.index.get_loc(key)
return self._constructor(self.values[key],
index=self.index[key]).__finalize__(self)
def _get_values(self, indexer):
try:
return self._constructor(self._data.get_slice(indexer),
fastpath=True).__finalize__(self)
except Exception:
return self[indexer]
def _set_with_engine(self, key, value):
return self.set_value(key, value)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: type of caller
"""
return self._constructor(np.abs(self.values),
index=self.index).__finalize__(self)
def get(self, label, default=None):
"""
Returns value occupying requested label, default to specified
missing value if not present. Analogous to dict.get
Parameters
----------
label : object
Label value looking for
default : object, optional
Value to return if label not in index
Returns
-------
y : scalar
"""
if label in self.index:
loc = self.index.get_loc(label)
return self._get_val_at(loc)
else:
return default
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
loc = label if takeable is True else self.index.get_loc(label)
return self._get_val_at(loc)
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Series
Returns
-------
series : SparseSeries
"""
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
# and possibily change the index
new_values = values.set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
new_index = values.index
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, new_index)
self._index = new_index
def _set_values(self, key, value):
# this might be inefficient as we have to recreate the sparse array
# rather than setting individual elements, but have to convert
# the passed slice/boolean that's in dense space into a sparse indexer
# not sure how to do that!
if isinstance(key, Series):
key = key.values
values = self.values.to_dense()
values[key] = _index.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
def to_dense(self, sparse_only=False):
"""
Convert SparseSeries to (dense) Series
"""
if sparse_only:
int_index = self.sp_index.to_int_index()
index = self.index.take(int_index.indices)
return Series(self.sp_values, index=index, name=self.name)
else:
return Series(self.values.to_dense(), index=self.index,
name=self.name)
@property
def density(self):
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
new_data = self._data
if deep:
new_data = self._data.copy()
return self._constructor(new_data, sparse_index=self.sp_index,
fill_value=self.fill_value).__finalize__(self)
def reindex(self, index=None, method=None, copy=True, limit=None,
**kwargs):
"""
Conform SparseSeries to new Index
See Series.reindex docstring for general behavior
Returns
-------
reindexed : SparseSeries
"""
new_index = _ensure_index(index)
if self.index.equals(new_index):
if copy:
return self.copy()
else:
return self
return self._constructor(self._data.reindex(new_index, method=method,
limit=limit, copy=copy),
index=new_index).__finalize__(self)
def sparse_reindex(self, new_index):
"""
Conform sparse values to new SparseIndex
Parameters
----------
new_index : {BlockIndex, IntIndex}
Returns
-------
reindexed : SparseSeries
"""
if not isinstance(new_index, splib.SparseIndex):
raise TypeError('new index must be a SparseIndex')
block = self.block.sparse_reindex(new_index)
new_data = SingleBlockManager(block, self.index)
return self._constructor(new_data, index=self.index,
sparse_index=new_index,
fill_value=self.fill_value).__finalize__(self)
def take(self, indices, axis=0, convert=True, *args, **kwargs):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
convert = nv.validate_take_with_convert(convert, args, kwargs)
new_values = SparseArray.take(self.values, indices)
new_index = self.index.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of values. Preserves locations of NaN values
Returns
-------
cumsum : SparseSeries if `self` has a null `fill_value` and a
generic Series otherwise
"""
nv.validate_cumsum(args, kwargs)
new_array = SparseArray.cumsum(self.values)
if isinstance(new_array, SparseArray):
return self._constructor(
new_array, index=self.index,
sparse_index=new_array.sp_index).__finalize__(self)
# TODO: gh-12855 - return a SparseSeries here
return Series(new_array, index=self.index).__finalize__(self)
@Appender(generic._shared_docs['isnull'])
def isnull(self):
arr = SparseArray(isnull(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=isnull(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
@Appender(generic._shared_docs['isnotnull'])
def isnotnull(self):
arr = SparseArray(notnull(self.values.sp_values),
sparse_index=self.values.sp_index,
fill_value=notnull(self.fill_value))
return self._constructor(arr, index=self.index).__finalize__(self)
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Analogous to Series.dropna. If fill_value=NaN, returns a dense Series
"""
# TODO: make more efficient
axis = self._get_axis_number(axis or 0)
dense_valid = self.to_dense().valid()
if inplace:
raise NotImplementedError("Cannot perform inplace dropna"
" operations on a SparseSeries")
if isnull(self.fill_value):
return dense_valid
else:
dense_valid = dense_valid[dense_valid != self.fill_value]
return dense_valid.to_sparse(fill_value=self.fill_value)
@Appender(generic._shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods, freq=None, axis=0):
if periods == 0:
return self.copy()
# no special handling of fill values yet
if not isnull(self.fill_value):
shifted = self.to_dense().shift(periods, freq=freq,
axis=axis)
return shifted.to_sparse(fill_value=self.fill_value,
kind=self.kind)
if freq is not None:
return self._constructor(
self.sp_values, sparse_index=self.sp_index,
index=self.index.shift(periods, freq),
fill_value=self.fill_value).__finalize__(self)
int_index = self.sp_index.to_int_index()
new_indices = int_index.indices + periods
start, end = new_indices.searchsorted([0, int_index.length])
new_indices = new_indices[start:end]
new_sp_index = _make_index(len(self), new_indices, self.sp_index)
arr = self.values._simple_new(self.sp_values[start:end].copy(),
new_sp_index, fill_value=np.nan)
return self._constructor(arr, index=self.index).__finalize__(self)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
if isinstance(other, SparseSeries):
other = other.to_dense()
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value)
def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
.. versionadded:: 0.16.0
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> from numpy import nan
>>> s = Series([3.0, nan, 1.0, 3.0, nan, nan])
>>> s.index = MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
>>> ss = s.to_sparse()
>>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'],
column_levels=['C', 'D'],
sort_labels=True)
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 3.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
A, rows, columns = _sparse_series_to_coo(self, row_levels,
column_levels,
sort_labels=sort_labels)
return A, rows, columns
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a SparseSeries from a scipy.sparse.coo_matrix.
.. versionadded:: 0.16.0
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : SparseSeries
Examples
---------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
shape=(3, 4))
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 2.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> ss = SparseSeries.from_coo(A)
>>> ss
0 2 1
3 2
1 0 3
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
"""
return _coo_to_sparse_series(A, dense_index=dense_index)
# overwrite series methods with unaccelerated versions
ops.add_special_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_special_funcs)
ops.add_flex_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_flex_funcs)
# overwrite basic arithmetic to use SparseSeries version
# force methods to overwrite previous definitions.
ops.add_special_arithmetic_methods(SparseSeries, _arith_method,
comp_method=_arith_method,
bool_method=None, use_numexpr=False,
force=True)
# backwards compatiblity
class SparseTimeSeries(SparseSeries):
def __init__(self, *args, **kwargs):
# deprecation TimeSeries, #10890
warnings.warn("SparseTimeSeries is deprecated. Please use "
"SparseSeries", FutureWarning, stacklevel=2)
super(SparseTimeSeries, self).__init__(*args, **kwargs)
| |
from __future__ import unicode_literals
from psycopg2.extras import Inet
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def unification_cast_sql(self, output_field):
internal_type = output_field.get_internal_type()
if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"):
# PostgreSQL will resolve a union as type 'text' if input types are
# 'unknown'.
# http://www.postgresql.org/docs/9.4/static/typeconv-union-case.html
# These fields cannot be implicitly cast back in the default
# PostgreSQL configuration so we need to explicitly cast them.
# We must also remove components of the type within brackets:
# varchar(255) -> varchar.
return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0]
return '%s'
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "%s AT TIME ZONE %%s" % field_name
params = [tzname]
else:
params = []
return field_name, params
def datetime_cast_date_sql(self, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = '(%s)::date' % field_name
return sql, params
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
return sql, params
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type, internal_type=None):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
if internal_type in ('IPAddressField', 'GenericIPAddressField'):
lookup = "HOST(%s)"
else:
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prepare_sql_script(self, sql):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences, allow_cascade=False):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
tables_sql = ', '.join(
style.SQL_FIELD(self.quote_name(table)) for table in tables)
if allow_cascade:
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
style.SQL_KEYWORD('CASCADE'),
)]
else:
sql = ['%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
tables_sql,
)]
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" %
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table)),
)
)
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.remote_field.through:
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;" % (
style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))
)
)
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode('utf-8')
return None
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
def adapt_datefield_value(self, value):
return value
def adapt_datetimefield_value(self, value):
return value
def adapt_timefield_value(self, value):
return value
def adapt_ipaddressfield_value(self, value):
if value:
return Inet(value)
return None
| |
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin`` or ``manage.py``).
"""
import os
import sys
from argparse import ArgumentParser
from io import TextIOBase
import django
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style, no_style
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.exceptions import MigrationSchemaMissing
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, cmd, **kwargs):
self.cmd = cmd
super().__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if (hasattr(self.cmd, 'missing_args_message') and
not (args or any(not arg.startswith('-') for arg in args))):
self.error(self.cmd.missing_args_message)
return super().parse_args(args, namespace)
def error(self, message):
if self.cmd._called_from_command_line:
super().error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(TextIOBase):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and self.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def isatty(self):
return hasattr(self._out, 'isatty') and self._out.isatty()
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(style_func(msg))
class BaseCommand:
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``help``
A short description of the command, which will be printed in
help messages.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_migrations_checks``
A boolean; if ``True``, the command prints a warning if the set of
migrations on disk don't match the migrations in the database.
``requires_system_checks``
A boolean; if ``True``, entire Django project will be checked for errors
prior to executing the command. Default value is ``True``.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of translations
being deactivated.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrib.auth
permissions) as activating any locale might cause unintended effects.
"""
# Metadata about this command.
help = ''
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
requires_migrations_checks = False
requires_system_checks = True
def __init__(self, stdout=None, stderr=None, no_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color:
self.style = no_style()
else:
self.style = color_style()
self.stderr.style_func = self.style.ERROR
def get_version(self):
"""
Return the Django version, which should be correct for all built-in
Django commands. User-supplied commands can override this method to
return their own version.
"""
return django.get_version()
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
parser = CommandParser(
self, prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None,
)
parser.add_argument('--version', action='version', version=self.get_version())
parser.add_argument(
'-v', '--verbosity', action='store', dest='verbosity', default=1,
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output',
)
parser.add_argument(
'--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument(
'--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".',
)
parser.add_argument('--traceback', action='store_true', help='Raise on CommandError exceptions')
parser.add_argument(
'--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.",
)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
finally:
try:
connections.close_all()
except ImproperlyConfigured:
# Ignore if connections aren't setup at this point (e.g. no
# configured settings).
pass
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options['no_color']:
self.style = no_style()
self.stderr.style_func = None
if options.get('stdout'):
self.stdout = OutputWrapper(options['stdout'])
if options.get('stderr'):
self.stderr = OutputWrapper(options['stderr'], self.stderr.style_func)
saved_locale = None
if not self.leave_locale_alone:
# Deactivate translations, because django-admin creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.deactivate_all()
try:
if self.requires_system_checks and not options.get('skip_checks'):
self.check()
if self.requires_migrations_checks:
self.check_migrations()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
output = '%s\n%s\n%s' % (
self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()),
output,
self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()),
)
self.stdout.write(output)
finally:
if saved_locale is not None:
translation.activate(saved_locale)
return output
def _run_checks(self, **kwargs):
return checks.run_checks(**kwargs)
def check(self, app_configs=None, tags=None, display_num_errors=False,
include_deployment_checks=False, fail_level=checks.ERROR):
"""
Use the system check framework to validate entire Django project.
Raise CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), print them to stderr
and don't raise an exception.
"""
all_issues = self._run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()]
warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()]
errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL and not e.is_silenced()]
criticals = [e for e in all_issues if checks.CRITICAL <= e.level and not e.is_silenced()]
sorted_issues = [
(criticals, 'CRITICALS'),
(errors, 'ERRORS'),
(warnings, 'WARNINGS'),
(infos, 'INFOS'),
(debugs, 'DEBUGS'),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(str(e))
if e.is_serious()
else self.style.WARNING(str(e))
for e in issues)
formatted = "\n".join(sorted(formatted))
body += '\n%s:\n%s\n' % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += '\n'
footer += "System check identified %s (%s silenced)." % (
"no issues" if visible_issue_count == 0 else
"1 issue" if visible_issue_count == 1 else
"%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious(fail_level) and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def check_migrations(self):
"""
Print a warning if the set of migrations on disk don't match the
migrations in the database.
"""
from django.db.migrations.executor import MigrationExecutor
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
except MigrationSchemaMissing:
self.stdout.write(self.style.NOTICE(
"\nNot checking migrations as it is not possible to access/create the django_migrations table."
))
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
apps_waiting_migration = sorted(set(migration.app_label for migration, backwards in plan))
self.stdout.write(
self.style.NOTICE(
"\nYou have %(unpplied_migration_count)s unapplied migration(s). "
"Your project may not work properly until you apply the "
"migrations for app(s): %(apps_waiting_migration)s." % {
"unpplied_migration_count": len(plan),
"apps_waiting_migration": ", ".join(apps_waiting_migration),
}
)
)
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+', help='One or more application label.')
def handle(self, *app_labels, **options):
from django.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
raise NotImplementedError(
"Subclasses of AppCommand must provide"
"a handle_app_config() method.")
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = 'label'
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument('args', metavar=self.label, nargs='+')
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
| |
from __future__ import annotations
import json
import logging
import os
import os.path
from datetime import datetime
from tlz import first, merge
from tornado import escape
from tornado.websocket import WebSocketHandler
from dask.utils import format_bytes, format_time
from ...diagnostics.websocket import WebsocketPlugin
from ...metrics import time
from ...utils import log_errors
from ..utils import RequestHandler, redirect
ns = {
func.__name__: func
for func in [format_bytes, format_time, datetime.fromtimestamp, time]
}
rel_path_statics = {"rel_path_statics": "../../.."}
logger = logging.getLogger(__name__)
class Workers(RequestHandler):
def get(self):
with log_errors():
self.render(
"workers.html",
title="Workers",
scheduler=self.server,
**merge(
self.server.__dict__,
self.server.__pdict__,
ns,
self.extra,
rel_path_statics,
),
)
class Worker(RequestHandler):
def get(self, worker):
worker = escape.url_unescape(worker)
if worker not in self.server.workers:
self.send_error(404)
return
with log_errors():
self.render(
"worker.html",
title="Worker: " + worker,
scheduler=self.server,
Worker=worker,
**merge(
self.server.__dict__,
self.server.__pdict__,
ns,
self.extra,
rel_path_statics,
),
)
class Task(RequestHandler):
def get(self, task):
task = escape.url_unescape(task)
if task not in self.server.tasks:
self.send_error(404)
return
with log_errors():
self.render(
"task.html",
title="Task: " + task,
Task=task,
scheduler=self.server,
**merge(
self.server.__dict__,
self.server.__pdict__,
ns,
self.extra,
rel_path_statics,
),
)
class Logs(RequestHandler):
def get(self):
with log_errors():
logs = self.server.get_logs()
self.render(
"logs.html",
title="Logs",
logs=logs,
**merge(self.extra, rel_path_statics),
)
class WorkerLogs(RequestHandler):
async def get(self, worker):
with log_errors():
worker = escape.url_unescape(worker)
logs = await self.server.get_worker_logs(workers=[worker])
logs = logs[worker]
self.render(
"logs.html",
title="Logs: " + worker,
logs=logs,
**merge(self.extra, rel_path_statics),
)
class WorkerCallStacks(RequestHandler):
async def get(self, worker):
with log_errors():
worker = escape.url_unescape(worker)
keys = self.server.processing[worker]
call_stack = await self.server.get_call_stack(keys=keys)
self.render(
"call-stack.html",
title="Call Stacks: " + worker,
call_stack=call_stack,
**merge(self.extra, rel_path_statics),
)
class TaskCallStack(RequestHandler):
async def get(self, key):
with log_errors():
key = escape.url_unescape(key)
call_stack = await self.server.get_call_stack(keys=[key])
if not call_stack:
self.write(
"<p>Task not actively running. "
"It may be finished or not yet started</p>"
)
else:
self.render(
"call-stack.html",
title="Call Stack: " + key,
call_stack=call_stack,
**merge(self.extra, rel_path_statics),
)
class IndividualPlots(RequestHandler):
def get(self):
try:
from bokeh.server.tornado import BokehTornado
bokeh_application = first(
app
for app in self.server.http_application.applications
if isinstance(app, BokehTornado)
)
individual_bokeh = {
uri.strip("/").replace("-", " ").title(): uri
for uri in bokeh_application.app_paths
if uri.lstrip("/").startswith("individual-")
and not uri.endswith(".json")
}
individual_static = {
uri.strip("/")
.replace(".html", "")
.replace("-", " ")
.title(): "/statics/"
+ uri
for uri in os.listdir(
os.path.join(os.path.dirname(__file__), "..", "static")
)
if uri.lstrip("/").startswith("individual-") and uri.endswith(".html")
}
result = {**individual_bokeh, **individual_static}
self.write(result)
except (ImportError, StopIteration):
self.write({})
class EventstreamHandler(WebSocketHandler):
def initialize(self, dask_server=None, extra=None):
self.server = dask_server
self.extra = extra or {}
self.plugin = WebsocketPlugin(self, self.server)
self.server.add_plugin(self.plugin)
def send(self, name, data):
data["name"] = name
for k in list(data):
# Drop bytes objects for now
if isinstance(data[k], bytes):
del data[k]
self.write_message(data)
def open(self):
for worker in self.server.workers:
self.plugin.add_worker(self.server, worker)
def on_message(self, message):
message = json.loads(message)
if message["name"] == "ping":
self.send("pong", {"timestamp": str(datetime.now())})
def on_close(self):
self.server.remove_plugin(name=self.plugin.name)
routes: list[tuple] = [
(r"info", redirect("info/main/workers.html"), {}),
(r"info/main/workers.html", Workers, {}),
(r"info/worker/(.*).html", Worker, {}),
(r"info/task/(.*).html", Task, {}),
(r"info/main/logs.html", Logs, {}),
(r"info/call-stacks/(.*).html", WorkerCallStacks, {}),
(r"info/call-stack/(.*).html", TaskCallStack, {}),
(r"info/logs/(.*).html", WorkerLogs, {}),
(r"individual-plots.json", IndividualPlots, {}),
(r"eventstream", EventstreamHandler, {}),
]
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AffineLinearOperator bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.linalg import linear_operator
__all__ = [
"AffineLinearOperator",
]
class AffineLinearOperator(bijector.Bijector):
"""Compute `Y = g(X; shift, scale) = scale @ X + shift`.
`shift` is a numeric `Tensor` and `scale` is a `LinearOperator`.
If `X` is a scalar then the forward transformation is: `scale * X + shift`
where `*` denotes the scalar product.
Note: we don't always simply transpose `X` (but write it this way for
brevity). Actually the input `X` undergoes the following transformation
before being premultiplied by `scale`:
1. If there are no sample dims, we call `X = tf.expand_dims(X, 0)`, i.e.,
`new_sample_shape = [1]`. Otherwise do nothing.
2. The sample shape is flattened to have one dimension, i.e.,
`new_sample_shape = [n]` where `n = tf.reduce_prod(old_sample_shape)`.
3. The sample dim is cyclically rotated left by 1, i.e.,
`new_shape = [B1,...,Bb, k, n]` where `n` is as above, `k` is the
event_shape, and `B1,...,Bb` are the batch shapes for each of `b` batch
dimensions.
(For more details see `shape.make_batch_of_event_sample_matrices`.)
The result of the above transformation is that `X` can be regarded as a batch
of matrices where each column is a draw from the distribution. After
premultiplying by `scale`, we take the inverse of this procedure. The input
`Y` also undergoes the same transformation before/after premultiplying by
`inv(scale)`.
Example Use:
```python
linalg = tf.linalg
x = [1., 2, 3]
shift = [-1., 0., 1]
diag = [1., 2, 3]
scale = linalg.LinearOperatorDiag(diag)
affine = AffineLinearOperator(shift, scale)
# In this case, `forward` is equivalent to:
# y = scale @ x + shift
y = affine.forward(x) # [0., 4, 10]
shift = [2., 3, 1]
tril = [[1., 0, 0],
[2, 1, 0],
[3, 2, 1]]
scale = linalg.LinearOperatorLowerTriangular(tril)
affine = AffineLinearOperator(shift, scale)
# In this case, `forward` is equivalent to:
# np.squeeze(np.matmul(tril, np.expand_dims(x, -1)), -1) + shift
y = affine.forward(x) # [3., 7, 11]
```
"""
def __init__(self,
shift=None,
scale=None,
event_ndims=1,
validate_args=False,
name="affine_linear_operator"):
"""Instantiates the `AffineLinearOperator` bijector.
Args:
shift: Floating-point `Tensor`.
scale: Subclass of `LinearOperator`. Represents the (batch) positive
definite matrix `M` in `R^{k x k}`.
event_ndims: Scalar `integer` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if `event_ndims` is not 0 or 1.
TypeError: if `scale` is not a `LinearOperator`.
TypeError: if `shift.dtype` does not match `scale.dtype`.
ValueError: if not `scale.is_non_singular`.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
graph_parents = []
with self._name_scope("init", values=[shift]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
if tensor_util.constant_value(event_ndims) is not None:
event_ndims = tensor_util.constant_value(event_ndims)
if event_ndims not in (0, 1):
raise ValueError("event_ndims({}) was not 0 or 1".format(event_ndims))
else:
if validate_args:
# Shape tool will catch if event_ndims is negative.
event_ndims = control_flow_ops.with_dependencies(
[check_ops.assert_less(
event_ndims, 2, message="event_ndims must be 0 or 1")],
event_ndims)
graph_parents += [event_ndims]
# In the absence of `loc` and `scale`, we'll assume `dtype` is `float32`.
dtype = dtypes.float32
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
graph_parents += [shift]
dtype = shift.dtype.base_dtype
self._shift = shift
if scale is not None:
if (shift is not None and
shift.dtype.base_dtype != scale.dtype.base_dtype):
raise TypeError(
"shift.dtype({}) is incompatible with scale.dtype({}).".format(
shift.dtype, scale.dtype))
if not isinstance(scale, linear_operator.LinearOperator):
raise TypeError("scale is not an instance of tf.LinearOperator")
if validate_args and not scale.is_non_singular:
raise ValueError("Scale matrix must be non-singular.")
graph_parents += scale.graph_parents
if scale.tensor_rank is not None:
batch_ndims = scale.tensor_rank - 2
else:
batch_ndims = scale.tensor_rank_tensor() - 2
graph_parents += [batch_ndims]
if scale.dtype is not None:
dtype = scale.dtype.base_dtype
else:
batch_ndims = 0 # We won't need shape inference when scale is None.
self._scale = scale
self._shaper = _DistributionShape(
batch_ndims=batch_ndims,
event_ndims=event_ndims,
validate_args=validate_args)
super(AffineLinearOperator, self).__init__(
event_ndims=event_ndims,
graph_parents=graph_parents,
is_constant_jacobian=True,
dtype=dtype,
validate_args=validate_args,
name=name)
@property
def shift(self):
"""The `shift` `Tensor` in `Y = scale @ X + shift`."""
return self._shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + shift`."""
return self._scale
def _forward(self, x):
y = x
if self.scale is not None:
y, sample_shape = self._shaper.make_batch_of_event_sample_matrices(
y, expand_batch_dim=False)
with ops.control_dependencies(self._maybe_collect_assertions() if
self.validate_args else []):
y = self.scale.matmul(y)
y = self._shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape, expand_batch_dim=False)
if self.shift is not None:
y += self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self.scale is not None:
x, sample_shape = self._shaper.make_batch_of_event_sample_matrices(
x, expand_batch_dim=False)
# Solve fails if the op is singular so we may safely skip this assertion.
x = self.scale.solve(x)
x = self._shaper.undo_make_batch_of_event_sample_matrices(
x, sample_shape, expand_batch_dim=False)
return x
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(y)
def _forward_log_det_jacobian(self, x): # pylint: disable=unused-argument
if self.scale is None:
return constant_op.constant(0, dtype=x.dtype.base_dtype)
with ops.control_dependencies(self._maybe_collect_assertions() if
self.validate_args else []):
return self.scale.log_abs_determinant()
def _maybe_collect_assertions(self):
try:
return [self.scale.assert_non_singular()]
except NotImplementedError:
pass
return []
| |
#!/usr/bin/python
import participantCollection
import re
import datetime
import pyperclip
currentMonthIndex = datetime.date.today().month
#TODO: need to figure out how to get total days in current month...
currentMonthTotalDays = 30
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}[nextMonthIndex]
currentDayOfMonthIndex = datetime.date.today().day
# TODO: testing...
currentDayOfMonthIndex = 29
# TODO: more...
currentDayOfMonthName = {1:'first', 2:'second', 3:'third', 4:'fourth', 5:'fifth', 6:'sixth', 7:'seventh', 8:'eighth', 9:'ninth', 10:'tenth', 11:'eleventh', 12:'twelfth', 13:'thirteenth', 14:'fourteenth', 15:'fifteenth', 16:'sixteenth', 17:'seventeenth', 18:'eighteenth', 19:'nineteenth', 20:'twentieth', 21:'twenty-first', 22:'twenty-second', 23:'twenty-third', 24:'twenty-fourth', 25:'twenty-fifth', 26:'twenty-sixth', 27:'twenty-seventh', 28:'twenty-eighth', 29:'twenty-ninth', 30:'thirtieth', 31:'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday', 4:'Friday', 5:'Saturday', 6:'Sunday'}[datetime.date.today().weekday()]
participantCollection = participantCollection.ParticipantCollection()
numberStillIn = participantCollection.sizeOfParticipantsWhoAreStillIn()
initialNumber = participantCollection.size()
percentStillIn = int(round(100*numberStillIn/initialNumber,0))
# print "There are currently **" + str(numberStillIn) + " out of " + str(initialNumber) +"** original participants. That's **" + str(int(round(100*numberStillIn/initialNumber,0))) + "%** Here is the list of participants still with the challenge:\n"
def stringToPrintLegacy():
answer = "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer = re.sub( 'NUMBER_STILL_IN', str(numberStillIn), answer )
answer = re.sub( 'INITIAL_NUMBER', str(initialNumber), answer )
answer = re.sub( 'PERCENT_STILL_IN', str(percentStillIn), answer )
for participant in participantCollection.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipants():
answer = ""
for participant in participantCollection.participantsWhoAreStillIn():
answer += "/u/" + participant.name
if not participant.hasCheckedIn:
answer += " ~"
answer += "\n\n"
return answer
def templateForParticipantsOnFinalDay():
answer = ""
answer += "These participants have checked in at least once in the last 15 days:\n"
answer += "\n"
for participant in participantCollection.participantsWhoAreStillInAndHaveCheckedIn():
answer += "/u/" + participant.name + "\n"
answer += "\n"
answer += "These participants have not reported a relapse, so they are still in the running, but **if they do not check in by the end of today, they will be removed from the list, and will not be considered victorious**:\n"
answer += "\n"
for participant in participantCollection.participantsWhoAreStillInAndHaveNotCheckedIn():
answer += "/u/" + participant.name + " ~\n"
answer += "\n"
return answer
def templateFor1():
print '1\n\n'
answer = ""
print "============================================================="
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. We will no longer be accepting new signups. Best of luck to everyone here!\n"
answer += "\n"
answer += "Here's how this thing works:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "Here are our **INITIAL_NUMBER** original participants:\n\n"
answer += templateForParticipants()
print "============================================================="
return answer
def templateFor2to9():
print '2 to 9\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor10to14():
print '10 to 14\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THE COUNTDOWN: Attention everyone!** You have " + str(15-currentDayOfMonthIndex) + " days to make an update comment (if you haven't already) to be counted as an active participant! **Otherwise your name will be REMOVED from the list** on CURRENT_MONTH_INDEX/15!!\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor15():
print '15\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "**THIS IS YOUR LAST DAY TO CHECK IN** (if you haven't already) **BEFORE YOUR NAME IS REMOVED FROM THE LIST!** Check in by posting a brief comment.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads. If it is still there by CURRENT_MONTH_NAME 15th, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateFor16toPenultimate():
print '16 to penultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the CURRENT_DAY_OF_MONTH_NAME day of the Stay Clean: CURRENT_MONTH_NAME challenge. Keep fighting the good fight!\n"
answer += "\n"
answer += "If you think you should still be on this list but aren't, you probably got removed in the great purge of CURRENT_MONTH_NAME 15th because you never checked in. However, if you let me know you're still with it I might re-add you.\n"
answer += "\n"
answer += "Guidelines:\n"
answer += "\n"
answer += "- At the end of this post is a list of people who have signed up for the challenge, and who are still in the running. That means that they have not needed to reset because of a relapse or slip.\n"
answer += "- Please check in with the group in the comments as often as you want! Feel free to share thoughts, feelings, experiences, progress, wisdom, encouragement and whatever else!\n"
answer += "- **IMPORTANT: if you relapse, please post a comment to that effect here** and I will remove your name from the list. We will not judge you or shame you, we have all been there.\n"
answer += '- If you have a "~" after your name, you have yet to check in on any update threads since CURRENT_MONTH_NAME 15. If it is still there by CURRENT_MONTH_NAME CURRENT_MONTH_TOTAL_DAYS, you will be removed from the list, in order to keep the numbers as realistic as possible.\n'
answer += "- We will not be accepting any new participants, but even if you're not on the list, please feel free to check in in the update threads anyway! Also, stay tuned to catch the NEXT_MONTH_NAME thread!\n"
answer += "\n"
answer += "Good luck!\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**. Here is the list of participants still with the challenge:\n\n"
answer += templateForParticipants()
return answer
def templateForUltimate():
print 'Ultimate\n\n'
answer = ""
answer += "**Daily news:** This is CURRENT_DAY_OF_WEEK_NAME, CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX, the last day of the Stay Clean: CURRENT_MONTH_NAME challenge. This is it, folks, the day we've been waiting for... the final day of the challenge. I'll be making a congratulatory post tomorrow to honor the victors. I'm really proud of everyone who signed up for this challenge. Quitting porn is difficult, especially in an era where porn is always as close as a few keystrokes, and triggers are absolutely everywhere. Everybody who gave it their best shot deserves to take a minute right now to feel good about themselves.\n"
answer += "\n"
answer += "For a chart of relapse data, check out [this Google Spreadsheet](https://docs.google.com/spreadsheets/d/1fnRMkDqFAJpsWHaZt8duMkZIPBCtUy0IfGFmlIfvOII/edit#gid=0).\n"
answer += "\n"
#TODO: need to do the part where it lists the checked in and non-checked in participants separately.
answer += "There are currently **NUMBER_STILL_IN out of INITIAL_NUMBER** original participants. That's **PERCENT_STILL_IN%**.\n\n"
answer += templateForParticipantsOnFinalDay()
return answer
def templateToUse():
# return stringToPrintLegacy()
if currentDayOfMonthIndex == 1:
return templateFor1()
#elif ( currentDayOfMonthIndex >= 2 ) and ( currentDayOfMonthIndex <= 9 ):
elif ( 2 <= currentDayOfMonthIndex <= 9 ):
return templateFor2to9()
#elif ( currentDayOfMonthIndex >= 10 ) and ( currentDayOfMonthIndex <= 14 ):
elif ( 10 <= currentDayOfMonthIndex <= 14 ):
return templateFor10to14()
if currentDayOfMonthIndex == 15:
return templateFor15()
#elif ( currentDayOfMonthIndex >= 16 ) and ( currentDayOfMonthIndex <= 14 ):
elif ( currentDayOfMonthIndex >= 16 ) and ( currentDayOfMonthIndex <= currentMonthPenultimateDayIndex ):
return templateFor16toPenultimate()
else:
return templateForUltimate()
def stringToPrint():
answer = templateToUse()
answer = re.sub( 'NUMBER_STILL_IN', str(numberStillIn), answer )
answer = re.sub( 'INITIAL_NUMBER', str(initialNumber), answer )
answer = re.sub( 'PERCENT_STILL_IN', str(percentStillIn), answer )
answer = re.sub( 'CURRENT_MONTH_INDEX', str(currentMonthIndex), answer )
answer = re.sub( 'CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer )
answer = re.sub( 'CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer )
answer = re.sub( 'CURRENT_MONTH_NAME', currentMonthName, answer )
answer = re.sub( 'NEXT_MONTH_INDEX', str(nextMonthIndex), answer )
answer = re.sub( 'NEXT_MONTH_NAME', nextMonthName, answer )
answer = re.sub( 'CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer )
answer = re.sub( 'CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer )
answer = re.sub( 'CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer )
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
# print re.sub('FOO', 'there', 'hello FOO yall')
# for participant in participantCollection.participantsWhoAreStillIn():
# if participant.hasCheckedIn:
# print "/u/" + participant.name
# else:
# print "/u/" + participant.name + " ~"
# print ""
| |
# Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'create_graph_component_from_pipeline_func',
]
import inspect
from collections import OrderedDict
from typing import Callable, Mapping, Optional
from . import _components
from . import structures
from ._structures import TaskSpec, ComponentSpec, OutputSpec, GraphInputReference, TaskOutputArgument, GraphImplementation, GraphSpec
from ._naming import _make_name_unique_by_adding_index
from ._python_op import _extract_component_interface
from ._components import _create_task_factory_from_component_spec
def create_graph_component_from_pipeline_func(
pipeline_func: Callable,
output_component_file: str = None,
embed_component_specs: bool = False,
annotations: Optional[Mapping[str, str]] = None,
) -> Callable:
"""Creates graph component definition from a python pipeline function. The
component file can be published for sharing.
Pipeline function is a function that only calls component functions and passes outputs to inputs.
This feature is experimental and lacks support for some of the DSL features like conditions and loops.
Only pipelines consisting of loaded components or python components are currently supported (no manually created ContainerOps or ResourceOps).
.. warning::
Please note this feature is considered experimental!
Args:
pipeline_func: Python function to convert
output_component_file: Path of the file where the component definition will be written. The `component.yaml` file can then be published for sharing.
embed_component_specs: Whether to embed component definitions or just reference them. Embedding makes the graph component self-contained. Default is False.
annotations: Optional. Allows adding arbitrary key-value data to the component specification.
Returns:
A function representing the graph component. The component spec can be accessed using the .component_spec attribute.
The function will have the same parameters as the original function.
When called, the function will return a task object, corresponding to the graph component.
To reference the outputs of the task, use task.outputs["Output name"].
Example::
producer_op = load_component_from_file('producer/component.yaml')
processor_op = load_component_from_file('processor/component.yaml')
def pipeline1(pipeline_param_1: int):
producer_task = producer_op()
processor_task = processor_op(pipeline_param_1, producer_task.outputs['Output 2'])
return OrderedDict([
('Pipeline output 1', producer_task.outputs['Output 1']),
('Pipeline output 2', processor_task.outputs['Output 2']),
])
create_graph_component_from_pipeline_func(pipeline1, output_component_file='pipeline.component.yaml')
"""
component_spec = create_graph_component_spec_from_pipeline_func(
pipeline_func, embed_component_specs)
if annotations:
component_spec.metadata = structures.MetadataSpec(
annotations=annotations,)
if output_component_file:
from pathlib import Path
from ._yaml_utils import dump_yaml
component_dict = component_spec.to_dict()
component_yaml = dump_yaml(component_dict)
Path(output_component_file).write_text(component_yaml)
return _create_task_factory_from_component_spec(component_spec)
def create_graph_component_spec_from_pipeline_func(
pipeline_func: Callable,
embed_component_specs: bool = False) -> ComponentSpec:
component_spec = _extract_component_interface(pipeline_func)
# Checking the function parameters - they should not have file passing annotations.
input_specs = component_spec.inputs or []
for input in input_specs:
if input._passing_style:
raise TypeError(
'Graph component function parameter "{}" cannot have file-passing annotation "{}".'
.format(input.name, input._passing_style))
task_map = OrderedDict() #Preserving task order
from ._components import _create_task_spec_from_component_and_arguments
def task_construction_handler(
component_spec,
arguments,
component_ref,
):
task = _create_task_spec_from_component_and_arguments(
component_spec=component_spec,
arguments=arguments,
component_ref=component_ref,
)
#Rewriting task ids so that they're same every time
task_id = task.component_ref.spec.name or "Task"
task_id = _make_name_unique_by_adding_index(task_id, task_map.keys(),
' ')
for output_ref in task.outputs.values():
output_ref.task_output.task_id = task_id
output_ref.task_output.task = None
task_map[task_id] = task
# Remove the component spec from component reference unless it will make the reference empty or unless explicitly asked by the user
if not embed_component_specs and any([
task.component_ref.name, task.component_ref.url,
task.component_ref.digest
]):
task.component_ref.spec = None
return task #The handler is a transformation function, so it must pass the task through.
# Preparing the pipeline_func arguments
# TODO: The key should be original parameter name if different
pipeline_func_args = {
input.name: GraphInputReference(input_name=input.name).as_argument()
for input in input_specs
}
try:
#Setting the handler to fix and catch the tasks.
# FIX: The handler only hooks container component creation
old_handler = _components._container_task_constructor
_components._container_task_constructor = task_construction_handler
#Calling the pipeline_func with GraphInputArgument instances as arguments
pipeline_func_result = pipeline_func(**pipeline_func_args)
finally:
_components._container_task_constructor = old_handler
# Getting graph outputs
output_names = [output.name for output in (component_spec.outputs or [])]
if len(output_names) == 1 and output_names[
0] == 'Output': # TODO: Check whether the NamedTuple syntax was used
pipeline_func_result = [pipeline_func_result]
if isinstance(pipeline_func_result, tuple) and hasattr(
pipeline_func_result,
'_asdict'): # collections.namedtuple and typing.NamedTuple
pipeline_func_result = pipeline_func_result._asdict()
if isinstance(pipeline_func_result, dict):
if output_names:
if set(output_names) != set(pipeline_func_result.keys()):
raise ValueError(
'Returned outputs do not match outputs specified in the function signature: {} = {}'
.format(
str(set(pipeline_func_result.keys())),
str(set(output_names))))
if pipeline_func_result is None:
graph_output_value_map = {}
elif isinstance(pipeline_func_result, dict):
graph_output_value_map = OrderedDict(pipeline_func_result)
elif isinstance(pipeline_func_result, (list, tuple)):
if output_names:
if len(pipeline_func_result) != len(output_names):
raise ValueError(
'Expected {} values from pipeline function, but got {}.'
.format(len(output_names), len(pipeline_func_result)))
graph_output_value_map = OrderedDict(
(name_value[0], name_value[1])
for name_value in zip(output_names, pipeline_func_result))
else:
graph_output_value_map = OrderedDict(
(output_value.task_output.output_name, output_value)
for output_value in pipeline_func_result
) # TODO: Fix possible name non-uniqueness (e.g. use task id as prefix or add index to non-unique names)
else:
raise TypeError('Pipeline must return outputs as tuple or OrderedDict.')
#Checking the pipeline_func output object types
for output_name, output_value in graph_output_value_map.items():
if not isinstance(output_value, TaskOutputArgument):
raise TypeError(
'Only TaskOutputArgument instances should be returned from graph component, but got "{}" = "{}".'
.format(output_name, str(output_value)))
if not component_spec.outputs and graph_output_value_map:
component_spec.outputs = [
OutputSpec(name=output_name, type=output_value.task_output.type)
for output_name, output_value in graph_output_value_map.items()
]
component_spec.implementation = GraphImplementation(
graph=GraphSpec(
tasks=task_map,
output_values=graph_output_value_map,
))
return component_spec
| |
# Code Authors: Pan Ji, University of Adelaide, pan.ji@adelaide.edu.au
# Tong Zhang, Australian National University, tong.zhang@anu.edu.au
# Copyright Reserved!
import tensorflow as tf
import numpy as np
from tensorflow.contrib import layers
import scipy.io as sio
from scipy.sparse.linalg import svds
from sklearn import cluster
from sklearn.preprocessing import normalize
from munkres import Munkres
class ConvAE(object):
def __init__(self, n_input, kernel_size, n_hidden, reg_constant1 = 1.0, re_constant2 = 1.0, batch_size = 200, reg = None, \
denoise = False, model_path = None, restore_path = None, \
logs_path = '/home/pan/workspace-eclipse/deep-subspace-clustering/models_face/logs'):
self.n_input = n_input
self.kernel_size = kernel_size
self.n_hidden = n_hidden
self.batch_size = batch_size
self.reg = reg
self.model_path = model_path
self.restore_path = restore_path
self.iter = 0
#input required to be fed
self.x = tf.placeholder(tf.float32, [None, n_input[0], n_input[1], 1])
self.learning_rate = tf.placeholder(tf.float32, [])
weights = self._initialize_weights()
if denoise == False:
x_input = self.x
latent, shape = self.encoder(x_input, weights)
else:
x_input = tf.add(self.x, tf.random_normal(shape=tf.shape(self.x),
mean = 0,
stddev = 0.2,
dtype=tf.float32))
latent, shape = self.encoder(x_input, weights)
z = tf.reshape(latent, [batch_size, -1])
Coef = weights['Coef']
z_c = tf.matmul(Coef,z)
self.Coef = Coef
latent_c = tf.reshape(z_c, tf.shape(latent))
self.z = z
self.x_r = self.decoder(latent_c, weights, shape)
# l_2 reconstruction loss
self.reconst_cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.x_r, self.x), 2.0))
tf.summary.scalar("recons_loss", self.reconst_cost)
self.reg_losses = tf.reduce_sum(tf.pow(self.Coef,2.0))
tf.summary.scalar("reg_loss", reg_constant1 * self.reg_losses )
self.selfexpress_losses = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(z_c, z), 2.0))
tf.summary.scalar("selfexpress_loss", re_constant2 * self.selfexpress_losses )
self.loss = self.reconst_cost + reg_constant1 * self.reg_losses + re_constant2 * self.selfexpress_losses
self.merged_summary_op = tf.summary.merge_all()
self.optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate).minimize(self.loss) #GradientDescentOptimizer #AdamOptimizer
self.init = tf.global_variables_initializer()
self.sess = tf.InteractiveSession()
self.sess.run(self.init)
self.saver = tf.train.Saver([v for v in tf.trainable_variables() if not (v.name.startswith("Coef"))])
self.summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
def _initialize_weights(self):
all_weights = dict()
all_weights['enc_w0'] = tf.get_variable("enc_w0", shape=[self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['enc_b0'] = tf.Variable(tf.zeros([self.n_hidden[0]], dtype = tf.float32))
all_weights['enc_w1'] = tf.get_variable("enc_w1", shape=[self.kernel_size[1], self.kernel_size[1], self.n_hidden[0],self.n_hidden[1]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['enc_b1'] = tf.Variable(tf.zeros([self.n_hidden[1]], dtype = tf.float32))
all_weights['enc_w2'] = tf.get_variable("enc_w2", shape=[self.kernel_size[2], self.kernel_size[2], self.n_hidden[1],self.n_hidden[2]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['enc_b2'] = tf.Variable(tf.zeros([self.n_hidden[2]], dtype = tf.float32))
all_weights['Coef'] = tf.Variable(1.0e-4 * tf.ones([self.batch_size, self.batch_size],tf.float32), name = 'Coef')
all_weights['dec_w0'] = tf.get_variable("dec_w0", shape=[self.kernel_size[2], self.kernel_size[2], self.n_hidden[1],self.n_hidden[2]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['dec_b0'] = tf.Variable(tf.zeros([self.n_hidden[1]], dtype = tf.float32))
all_weights['dec_w1'] = tf.get_variable("dec_w1", shape=[self.kernel_size[1], self.kernel_size[1], self.n_hidden[0],self.n_hidden[1]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['dec_b1'] = tf.Variable(tf.zeros([self.n_hidden[0]], dtype = tf.float32))
all_weights['dec_w2'] = tf.get_variable("dec_w2", shape=[self.kernel_size[0], self.kernel_size[0],1, self.n_hidden[0]],
initializer=layers.xavier_initializer_conv2d(),regularizer = self.reg)
all_weights['dec_b2'] = tf.Variable(tf.zeros([1], dtype = tf.float32))
return all_weights
# Building the encoder
def encoder(self,x, weights):
shapes = []
# Encoder Hidden layer with relu activation #1
shapes.append(x.get_shape().as_list())
layer1 = tf.nn.bias_add(tf.nn.conv2d(x, weights['enc_w0'], strides=[1,2,2,1],padding='SAME'),weights['enc_b0'])
layer1 = tf.nn.relu(layer1)
shapes.append(layer1.get_shape().as_list())
layer2 = tf.nn.bias_add(tf.nn.conv2d(layer1, weights['enc_w1'], strides=[1,2,2,1],padding='SAME'),weights['enc_b1'])
layer2 = tf.nn.relu(layer2)
shapes.append(layer2.get_shape().as_list())
layer3 = tf.nn.bias_add(tf.nn.conv2d(layer2, weights['enc_w2'], strides=[1,2,2,1],padding='SAME'),weights['enc_b2'])
layer3 = tf.nn.relu(layer3)
return layer3, shapes
# Building the decoder
def decoder(self,z, weights, shapes):
# Encoder Hidden layer with relu activation #1
shape_de1 = shapes[2]
layer1 = tf.add(tf.nn.conv2d_transpose(z, weights['dec_w0'], tf.stack([tf.shape(self.x)[0],shape_de1[1],shape_de1[2],shape_de1[3]]),\
strides=[1,2,2,1],padding='SAME'),weights['dec_b0'])
layer1 = tf.nn.relu(layer1)
shape_de2 = shapes[1]
layer2 = tf.add(tf.nn.conv2d_transpose(layer1, weights['dec_w1'], tf.stack([tf.shape(self.x)[0],shape_de2[1],shape_de2[2],shape_de2[3]]),\
strides=[1,2,2,1],padding='SAME'),weights['dec_b1'])
layer2 = tf.nn.relu(layer2)
shape_de3= shapes[0]
layer3 = tf.add(tf.nn.conv2d_transpose(layer2, weights['dec_w2'], tf.stack([tf.shape(self.x)[0],shape_de3[1],shape_de3[2],shape_de3[3]]),\
strides=[1,2,2,1],padding='SAME'),weights['dec_b2'])
layer3 = tf.nn.relu(layer3)
return layer3
def partial_fit(self, X, lr): #
cost, summary, _, Coef = self.sess.run((self.reconst_cost, self.merged_summary_op, self.optimizer, self.Coef), feed_dict = {self.x: X, self.learning_rate: lr})#
self.summary_writer.add_summary(summary, self.iter)
self.iter = self.iter + 1
return cost, Coef
def initlization(self):
self.sess.run(self.init)
def reconstruct(self,X):
return self.sess.run(self.x_r, feed_dict = {self.x:X})
def transform(self, X):
return self.sess.run(self.z, feed_dict = {self.x:X})
def save_model(self):
save_path = self.saver.save(self.sess,self.model_path)
print ("model saved in file: %s" % save_path)
def restore(self):
self.saver.restore(self.sess, self.restore_path)
print ("model restored")
def best_map(L1,L2):
#L1 should be the groundtruth labels and L2 should be the clustering labels we got
Label1 = np.unique(L1)
nClass1 = len(Label1)
Label2 = np.unique(L2)
nClass2 = len(Label2)
nClass = np.maximum(nClass1,nClass2)
G = np.zeros((nClass,nClass))
for i in range(nClass1):
ind_cla1 = L1 == Label1[i]
ind_cla1 = ind_cla1.astype(float)
for j in range(nClass2):
ind_cla2 = L2 == Label2[j]
ind_cla2 = ind_cla2.astype(float)
G[i,j] = np.sum(ind_cla2 * ind_cla1)
m = Munkres()
index = m.compute(-G.T)
index = np.array(index)
c = index[:,1]
newL2 = np.zeros(L2.shape)
for i in range(nClass2):
newL2[L2 == Label2[i]] = Label1[c[i]]
return newL2
def thrC(C,ro):
if ro < 1:
N = C.shape[1]
Cp = np.zeros((N,N))
S = np.abs(np.sort(-np.abs(C),axis=0))
Ind = np.argsort(-np.abs(C),axis=0)
for i in range(N):
cL1 = np.sum(S[:,i]).astype(float)
stop = False
csum = 0
t = 0
while(stop == False):
csum = csum + S[t,i]
if csum > ro*cL1:
stop = True
Cp[Ind[0:t+1,i],i] = C[Ind[0:t+1,i],i]
t = t + 1
else:
Cp = C
return Cp
def build_aff(C):
N = C.shape[0]
Cabs = np.abs(C)
ind = np.argsort(-Cabs,0)
for i in range(N):
Cabs[:,i]= Cabs[:,i] / (Cabs[ind[0,i],i] + 1e-6)
Cksym = Cabs + Cabs.T;
return Cksym
def post_proC(C, K, d, alpha):
# C: coefficient matrix, K: number of clusters, d: dimension of each subspace
C = 0.5*(C + C.T)
r = d*K + 1
U, S, _ = svds(C,r,v0 = np.ones(C.shape[0]))
U = U[:,::-1]
S = np.sqrt(S[::-1])
S = np.diag(S)
U = U.dot(S)
U = normalize(U, norm='l2', axis = 1)
Z = U.dot(U.T)
Z = Z * (Z>0)
L = np.abs(Z ** alpha)
L = L/L.max()
L = 0.5 * (L + L.T)
spectral = cluster.SpectralClustering(n_clusters=K, eigen_solver='arpack', affinity='precomputed',assign_labels='discretize')
spectral.fit(L)
grp = spectral.fit_predict(L) + 1
return grp, L
def err_rate(gt_s, s):
c_x = best_map(gt_s,s)
err_x = np.sum(gt_s[:] != c_x[:])
missrate = err_x.astype(float) / (gt_s.shape[0])
return missrate
def build_laplacian(C):
C = 0.5 * (np.abs(C) + np.abs(C.T))
W = np.sum(C,axis=0)
W = np.diag(1.0/W)
L = W.dot(C)
return L
def test_face(Img, Label, CAE, num_class):
alpha = max(0.4 - (num_class-1)/10 * 0.1, 0.1)
print alpha
acc_= []
for i in range(0,39-num_class):
face_10_subjs = np.array(Img[64*i:64*(i+num_class),:])
face_10_subjs = face_10_subjs.astype(float)
label_10_subjs = np.array(Label[64*i:64*(i+num_class)])
label_10_subjs = label_10_subjs - label_10_subjs.min() + 1
label_10_subjs = np.squeeze(label_10_subjs)
CAE.initlization()
CAE.restore() # restore from pre-trained model
max_step = 50 + num_class*25# 100+num_class*20
display_step = max_step
lr = 1.0e-3
# fine-tune network
epoch = 0
while epoch < max_step:
epoch = epoch + 1
cost, Coef = CAE.partial_fit(face_10_subjs, lr)#
if epoch % display_step == 0:
print "epoch: %.1d" % epoch, "cost: %.8f" % (cost/float(batch_size))
Coef = thrC(Coef,alpha)
y_x, _ = post_proC(Coef, label_10_subjs.max(), 10, 3.5)
missrate_x = err_rate(label_10_subjs, y_x)
acc_x = 1 - missrate_x
print "experiment: %d" % i, "our accuracy: %.4f" % acc_x
acc_.append(acc_x)
acc_ = np.array(acc_)
m = np.mean(acc_)
me = np.median(acc_)
print("%d subjects:" % num_class)
print("Mean: %.4f%%" % ((1-m)*100))
print("Median: %.4f%%" % ((1-me)*100))
print(acc_)
return (1-m), (1-me)
if __name__ == '__main__':
# load face images and labels
data = sio.loadmat('/home/pan/workspace-eclipse/deep-subspace-clustering/face_datasets/YaleBCrop025.mat')
img = data['Y']
I = []
Label = []
for i in range(img.shape[2]):
for j in range(img.shape[1]):
temp = np.reshape(img[:,j,i],[42,48])
Label.append(i)
I.append(temp)
I = np.array(I)
Label = np.array(Label[:])
Img = np.transpose(I,[0,2,1])
Img = np.expand_dims(Img[:],3)
# face image clustering
n_input = [48,42]
kernel_size = [5,3,3]
n_hidden = [10,20,30]
all_subjects = [10, 15, 20, 25, 30, 35, 38]
avg = []
med = []
iter_loop = 0
while iter_loop < len(all_subjects):
num_class = all_subjects[iter_loop]
batch_size = num_class * 64
reg1 = 1.0
reg2 = 1.0 * 10 ** (num_class / 10.0 - 3.0)
model_path = '/home/pan/workspace-eclipse/deep-subspace-clustering/models_face/model-102030-48x42-yaleb.ckpt'
restore_path = '/home/pan/workspace-eclipse/deep-subspace-clustering/models_face/model-102030-48x42-yaleb.ckpt'
logs_path = '/home/pan/workspace-eclipse/deep-subspace-clustering/conv_3_l1_yaleb/ft/logs'
tf.reset_default_graph()
CAE = ConvAE(n_input=n_input, n_hidden=n_hidden, reg_constant1=reg1, re_constant2=reg2, \
kernel_size=kernel_size, batch_size=batch_size, model_path=model_path, restore_path=restore_path, logs_path=logs_path)
avg_i, med_i = test_face(Img, Label, CAE, num_class)
avg.append(avg_i)
med.append(med_i)
iter_loop = iter_loop + 1
iter_loop = 0
while iter_loop < len(all_subjects):
num_class = all_subjects[iter_loop]
print '%d subjects:' % num_class
print 'Mean: %.4f%%' % (avg[iter_loop]*100), 'Median: %.4f%%' % (med[iter_loop]*100)
iter_loop = iter_loop + 1
| |
"""Supervises services."""
import abc
import errno
import logging
import os
import tempfile
import enum
import yaml
from treadmill import fs
from treadmill import dirwatch
from treadmill import subproc
from treadmill import supervisor
_LOGGER = logging.getLogger(__name__)
class Monitor(object):
"""Treadmill s6-based supervisor monitoring.
Enforces restart policy and execute failure actions.
"""
__slots__ = (
'_dirwatcher',
'_down_action',
'_down_reason',
'_policy_impl',
'_services',
'_services_dir',
'_service_policies',
)
def __init__(self, services_dir, service_dirs, policy_impl, down_action):
self._dirwatcher = None
self._down_action = down_action
self._down_reason = None
self._policy_impl = policy_impl
self._services = list(service_dirs)
self._services_dir = services_dir
self._service_policies = {}
def _on_created(self, new_entry):
if os.path.basename(new_entry)[0] == '.':
return
watched = os.path.dirname(new_entry)
# Check if the created entry is a new service or a service exit entry
if watched == self._services_dir:
self._add_service(new_entry)
else:
# A service exited
policy = self._service_policies.get(watched, None)
if policy is None:
return
if not policy.process():
self._down_reason = policy.fail_reason
def _on_deleted(self, removed_entry):
if os.path.basename(removed_entry)[0] == '.':
return
_LOGGER.debug('Policies %r', self._service_policies)
watched = os.path.dirname(removed_entry)
if watched == self._services_dir:
_LOGGER.debug('Removed service dir')
else:
# If a policy directory is being removed, remove the associated
# policy as well.
removed_svc_policy = self._service_policies.pop(
removed_entry, None
)
if removed_svc_policy is not None:
_LOGGER.debug('Removed %r. Remaining policies %r',
removed_svc_policy, self._service_policies)
return
def _add_service(self, new_service_dir):
# Add the new service
try:
service = supervisor.open_service(new_service_dir)
except (ValueError, IOError):
_LOGGER.exception('Unable to read service directory %r',
new_service_dir)
return
policy = self._policy_impl()
new_watch = policy.register(service)
self._service_policies[new_watch] = policy
# Add the new service directory to the policy watcher
self._dirwatcher.add_dir(new_watch)
# Immediately ensure we start within policy.
if not policy.process():
self._down_reason = policy.fail_reason
def run(self):
"""Run the monitor.
Start the event loop and continue until a service fails and the
configure down action considers it fatal.
"""
self._dirwatcher = dirwatch.DirWatcher()
self._dirwatcher.on_deleted = self._on_deleted
self._dirwatcher.on_created = self._on_created
service_dirs = self._services[:]
if self._services_dir is not None:
# If we have a svscan directory to watch add it.
self._dirwatcher.add_dir(self._services_dir)
service_dirs += [
os.path.join(self._services_dir, dentry)
for dentry in os.listdir(self._services_dir)
if dentry[0] != '.'
]
for service_dir in service_dirs:
self._add_service(service_dir)
keep_running = True
while keep_running:
while self._down_reason is None:
if self._dirwatcher.wait_for_events():
self._dirwatcher.process_events()
keep_running = self._down_action.execute(self._down_reason)
self._down_reason = None
return
class MonitorDownAction(object):
"""Abstract base clase for all monitor down actions.
Behavior when a service fails its policy.
"""
__metaclass__ = abc.ABCMeta
__slots__ = ()
@abc.abstractmethod
def execute(self, data):
"""Execute the down action.
:params ``dict`` data:
Output of the `class:MonitorPolicy.fail_reason()` method.
:returns ``bool``:
``True`` - Monitor should keep running.
``False`` - Monitor should stop.
"""
pass
class MonitorNodeDown(MonitorDownAction):
"""Monitor down action that disables the node by blacklisting it.
Triggers the blacklist through the watchdog service.
"""
__slots__ = (
'_watchdog_dir'
)
def __init__(self, tm_env):
self._watchdog_dir = tm_env.watchdog_dir
def execute(self, data):
"""Shut down the node by writing a watchdog with the down reason data.
"""
_LOGGER.critical('Node down: %r', data)
with tempfile.NamedTemporaryFile(prefix='.tmp',
dir=self._watchdog_dir,
delete=False,
mode='w') as f:
f.write(
'Node service {service!r} crashed.'
' Last exit {return_code} (sig:{signal}).'.format(
service=data['service'],
return_code=data['return_code'],
signal=data['signal']
)
)
os.fchmod(f.fileno(), 0o644)
os.rename(
f.name,
os.path.join(self._watchdog_dir, 'Monitor-%s' % data['service'])
)
return True
class MonitorPolicy(object):
"""Abstract base class of all monitor policies implementations.
Behaviors for policing services executions.
"""
__metaclass__ = abc.ABCMeta
__slots__ = ()
@abc.abstractmethod
def register(self, service):
"""Register a service directory with the Monitor.
:returns ``str``:
Absolute (real) path to the watch that needs to be added to the
monitor.
"""
pass
@abc.abstractmethod
def process(self):
"""Process an service event.
Ensure the service is down, check the policy and decide to restart the
service or not.
:returns ``bool``:
True - Service still in compliance.
False - Server failed policy.
"""
pass
@abc.abstractproperty
def fail_reason(self):
"""Policy failure data
:returns ``dict``:
Dictionary of failure data
"""
return
class MonitorRestartPolicyResult(enum.Enum):
"""Results of a MonitorRestartPolicy check.
"""
NOOP = 'noop'
RESTART = 'restart'
FAIL = 'fail'
class MonitorRestartPolicy(MonitorPolicy):
"""Restart services based on limit and interval.
"""
__slots__ = (
'_last_rc',
'_last_signal',
'_last_timestamp',
'_policy_interval',
'_policy_limit',
'_service',
'_service_exits_log',
)
EXITS_DIR = 'exits'
POLICY_FILE = 'policy.yml'
# TODO(boysson): configurable timeout for really down
REALLY_DOWN_TIMEOUT = '50'
def __init__(self):
self._last_rc = None
self._last_signal = None
self._last_timestamp = None
self._policy_interval = None
self._policy_limit = None
self._service = None
self._service_exits_log = None
@property
def fail_reason(self):
return {
'return_code': self._last_rc,
'service': self._service.name,
'signal': self._last_signal,
'timestamp': self._last_timestamp,
}
def register(self, service):
self._service = service
try:
with open(os.path.join(service.data_dir, self.POLICY_FILE)) as f:
policy_conf = yaml.load(stream=f)
self._policy_limit = policy_conf['limit']
self._policy_interval = policy_conf['interval']
except IOError as err:
if err.errno == errno.ENOENT:
self._policy_limit = 0
self._policy_interval = 60
else:
raise
service_exits_log = os.path.join(
service.data_dir, self.EXITS_DIR
)
fs.mkdir_safe(service_exits_log)
self._service_exits_log = service_exits_log
_LOGGER.info('monitoring %r with limit:%d interval:%d',
self._service, self._policy_limit, self._policy_interval)
return os.path.realpath(service_exits_log)
def process(self):
"""Process an event on the service directory
"""
result = self._check_policy()
if result is MonitorRestartPolicyResult.NOOP:
return True
elif result is MonitorRestartPolicyResult.FAIL:
return False
else:
# Bring the service back up.
_LOGGER.info('Bringing up %r', self._service)
subproc.check_call(
[
's6_svc', '-u',
self._service.directory
]
)
return True
def _check_policy(self):
"""Check the status of the service against the policy.
:returns ``MonitorRestartPolicyResult``:
``NOOP`` is nothing needs be done, ``RESTART`` to bring the service
back up and ``FAIL`` to fail.
"""
exits = sorted([
direntry
for direntry in os.listdir(self._service_exits_log)
if direntry[0] != '.'
])
total_restarts = len(exits)
if total_restarts == 0:
# If it never exited, nothing to do
return MonitorRestartPolicyResult.NOOP
last_timestamp, last_rc, last_sig = exits[-1].split(',')
self._last_timestamp = float(last_timestamp)
self._last_rc = int(last_rc)
self._last_signal = int(last_sig)
success = True
if total_restarts > self._policy_limit:
if self._policy_limit == 0:
# Do not allow any restart
success = False
else:
# Check if within policy
cutoff_exit = exits[-(self._policy_limit + 1)]
timestamp, _rc, _sig = cutoff_exit.split(',')
if (float(timestamp) + self._policy_interval >
self._last_timestamp):
success = False
if not success:
_LOGGER.critical(
'%r restart rate exceeded. Last exit @%r code %r (sig:%r)',
self._service,
self._last_timestamp, self._last_rc, self._last_signal
)
return MonitorRestartPolicyResult.FAIL
else:
# Otherwise, restart the service
_LOGGER.info(
'%r should be up. Last exit @%r code %r (sig:%r)',
self._service,
self._last_timestamp, self._last_rc, self._last_signal
)
# Cleanup old exits (up to 2x the policy)
for old_exit in exits[:-(self._policy_limit * 2)]:
os.unlink(os.path.join(self._service_exits_log, old_exit))
return MonitorRestartPolicyResult.RESTART
| |
from .core import *
from .execute import *
from .nbt import *
from .scoreboard import *
from .selector import *
from .text import *
class Cmd(Command):
def __init__(self, cmd):
self.command = cmd
def resolve(self, scope):
cmd = self.command
# This should be refactored eventually
# Support set_scoreboard_tracking() and text_set_click_run()
# in the C compiler
import re
while True:
m = re.search('\\$(func|entity_local):(.+?)\\$', cmd)
if not m:
break
if m.group(1) == 'func':
replace = scope.function_name(NSName('sub_' + m.group(2)))
else:
replace = scope.objective(m.group(2))
cmd = cmd[:m.start()] + replace + cmd[m.end():]
return cmd
class Function(Command):
def __init__(self, func_name):
assert isinstance(func_name, NSName)
self.name = func_name
def resolve(self, scope):
return 'function %s' % scope.function_name(self.name)
class FunctionTag(Command):
def __init__(self, tag_name):
assert isinstance(tag_name, NSName)
self._name = tag_name
def resolve(self, scope):
return 'function #' + scope.func_tag_name(self._name)
class Teleport(Command):
def __init__(self, target, *more):
assert isinstance(target, EntityRef)
self.args = [target]
self.args.extend(more)
def resolve(self, scope):
return 'tp %s' % ' '.join(a.resolve(scope) for a in self.args)
class Clone(Command):
def __init__(self, src0, src1, dest):
self.src0 = src0
self.src1 = src1
self.dest = dest
def resolve(self, scope):
return 'clone %s %s %s' % (self.src0.resolve(scope),
self.src1.resolve(scope),
self.dest.resolve(scope))
class Setblock(Command):
def __init__(self, pos, block):
assert isinstance(pos, WorldPos) and pos.block_pos
self.pos = pos
self.block = block
def resolve(self, scope):
return 'setblock %s %s' % (self.pos.resolve(scope),
self.block.resolve(scope))
class TeamModify(Command):
def __init__(self, team, attr, value):
assert isinstance(team, TeamName)
self.team = team
assert attr in ['color', 'friendlyFire', 'seeFriendlyInvisibles',
'nametagVisibility', 'deathMessageVisibility',
'collisionRule', 'displayName', 'prefix', 'suffix']
self.attr = attr
self.value = value
def resolve(self, scope):
return 'team modify %s %s %s' % (self.team.resolve(scope), self.attr,
self.value)
class JoinTeam(Command):
def __init__(self, team, members):
assert isinstance(team, TeamName)
assert members is None or isinstance(members, EntityRef)
self.team = team
self.members = members
def resolve(self, scope):
members = (' ' + self.members.resolve(scope)) if self.members else ''
return 'team join %s%s' % (self.team.resolve(scope), members)
class BossbarSet(Command):
def __init__(self, bar, prop, value):
assert isinstance(bar, Bossbar)
self.bar = bar
self.prop = prop
self.value = value
def resolve(self, scope):
value = (' ' + self.value.resolve(scope)) if self.value else ''
return 'bossbar set %s %s%s' % (self.bar.resolve(scope), self.prop,
value)
class Kill(Command):
def __init__(self, target):
assert isinstance(target, EntityRef)
self.target = target
def resolve(self, scope):
return 'kill %s' % self.target.resolve(scope)
class ReplaceItem(Command):
def __init__(self, ref, slot, item, amount=None):
assert isinstance(ref, NBTStorable)
self.ref = ref
self.slot = slot
self.item = item
self.amount = amount
def resolve(self, scope):
amount = (' %d' % self.amount) if self.amount is not None else ''
return 'replaceitem %s %s %s%s' % (self.ref.resolve(scope), self.slot,
self.item.resolve(scope), amount)
class GiveItem(Command):
def __init__(self, targets, item, count=1):
assert isinstance(targets, EntityRef)
self.targets = targets
self.item = item
self.count = count
def resolve(self, scope):
return 'give %s %s %d' % (self.targets.resolve(scope),
self.item.resolve(scope), self.count)
class ClearItem(Command):
def __init__(self, targets, item, max_count=-1):
assert isinstance(targets, EntityRef)
self.targets = targets
self.item = item
self.max_count = max_count
def resolve(self, scope):
return 'clear %s %s %d' % (self.targets.resolve(scope),
self.item.resolve(scope), self.max_count)
class EffectGive(Command):
def __init__(self, target, effect, seconds=None, amp=None, hide=None):
assert isinstance(target, EntityRef)
self.target = target
self.effect = effect
self.seconds = seconds if seconds is not None else 30
self.amp = amp if amp is not None else 0
self.hide = hide if hide is not None else False
def resolve(self, scope):
return 'effect give %s %s %d %d %s' % (self.target.resolve(scope),
self.effect, self.seconds, self.amp,
'true' if self.hide else 'false')
class Particle(Command):
def __init__(self, name, pos, delta, speed, count, mode, players):
self.name = name
self.pos = pos
self.delta = delta
self.speed = speed
self.count = count
self.mode = mode
self.players = players
def resolve(self, scope):
players = (' ' + self.players.resolve(scope)) if self.players else ''
return 'particle %s %s %s %f %d %s%s' % (self.name,
self.pos.resolve(scope), self.delta.resolve(scope),
self.speed, self.count, self.mode, players)
class Title(Command):
def __init__(self, target, action, *args):
assert isinstance(target, EntityRef)
self.target = target
self.action = action
self.args = args
def resolve(self, scope):
args = (' ' + SimpleResolve(*self.args).resolve(scope)) \
if self.args else ''
return 'title %s %s%s' % (self.target.resolve(scope), self.action, args)
class Summon(Command):
def __init__(self, entity_name, pos, data=None):
assert pos is None or isinstance(pos, WorldPos)
self.name = entity_name
self.pos = pos
self.data = data
def resolve(self, scope):
pos = (' ' + self.pos.resolve(scope)) if self.pos else \
(' ~ ~ ~' if self.data else '')
data = (' ' + self.data.resolve(scope)) if self.data else ''
return 'summon %s%s%s' % (self.name, pos, data)
class Advancement(Command):
def __init__(self, action, target, range, *args):
assert action in ['grant', 'revoke']
assert isinstance(target, EntityRef)
self.action = action
self.target = target
self.range = range
self.args = args
def resolve(self, scope):
args = (' ' + SimpleResolve(*self.args).resolve(scope)) \
if self.args else ''
return 'advancement %s %s %s%s' % (self.action,
self.target.resolve(scope),
self.range, args)
| |
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import ast
from webob import Response
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import dpset
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.app.wsgi import ControllerBase, WSGIApplication
LOG = logging.getLogger('ryu.app.ofctl_rest')
# supported ofctl versions in this restful app
supported_ofctl = {
ofproto_v1_0.OFP_VERSION: ofctl_v1_0,
ofproto_v1_2.OFP_VERSION: ofctl_v1_2,
ofproto_v1_3.OFP_VERSION: ofctl_v1_3,
}
# REST API
#
# Retrieve the switch stats
#
# get the list of all switches
# GET /stats/switches
#
# get the desc stats of the switch
# GET /stats/desc/<dpid>
#
# get flows stats of the switch
# GET /stats/flow/<dpid>
#
# get flows stats of the switch filtered by the fields
# POST /stats/flow/<dpid>
#
# get aggregate flows stats of the switch
# GET /stats/aggregateflow/<dpid>
#
# get aggregate flows stats of the switch filtered by the fields
# POST /stats/aggregateflow/<dpid>
#
# get table stats of the switch
# GET /stats/table/<dpid>
#
# get table features stats of the switch
# GET /stats/tablefeatures/<dpid>
#
# get ports stats of the switch
# GET /s
# tats/port/<dpid>
#
# get queues stats of the switch
# GET /stats/queue/<dpid>
#
# get queues config stats of the switch
# GET /stats/queueconfig/<dpid>/<port>
#
# get meter features stats of the switch
# GET /stats/meterfeatures/<dpid>
#
# get meter config stats of the switch
# GET /stats/meterconfig/<dpid>
#
# get meters stats of the switch
# GET /stats/meter/<dpid>
#
# get group features stats of the switch
# GET /stats/groupfeatures/<dpid>
#
# get groups desc stats of the switch
# GET /stats/groupdesc/<dpid>
#
# get groups stats of the switch
# GET /stats/group/<dpid>
#
# get ports description of the switch
# GET /stats/portdesc/<dpid>
# Update the switch stats
#
# add a flow entry
# POST /stats/flowentry/add
#
# modify all matching flow entries
# self.logger.info(" ETH_TYPE_LLDP:0x%08x", ether_types.ETH_TYPE_LLDP)
#
# modify flow entry strictly matching wildcards and priority
# POST /stats/flowentry/modify_strict
#
# delete all matching flow entries
# POST /stats/flowentry/delete
#
# delete flow entry strictly matching wildcards and priority
# POST /stats/flowentry/delete_strict
#
# delete all flow entries of the switch
# DELETE /stats/flowentry/clear/<dpid>
#
# add a meter entry
# POST /stats/meterentry/add
#
# modify a meter entry
# POST /stats/meterentry/modify
#
# delete a meter entry
# POST /stats/meterentry/delete
#
# add a group entry
# POST /stats/groupentry/add
#
# modify a group entry
# POST /stats/groupentry/modify
#
# delete a group entry
# POST /stats/groupentry/delete
#
# modify behavior of the physical port
# POST /stats/portdesc/modify
#
#
# send a experimeter message
# POST /stats/experimenter/<dpid>
class StatsController(ControllerBase):
def __init__(self, req, link, data, **config):
super(StatsController, self).__init__(req, link, data, **config)
self.dpset = data['dpset']
self.waiters = data['waiters']
def get_dpids(self, req, **_kwargs):
dps = list(self.dpset.dps.keys())
body = json.dumps(dps)
return Response(content_type='application/json', body=body)
def get_desc_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
desc = _ofctl.get_desc_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(desc)
return Response(content_type='application/json', body=body)
def get_flow_stats(self, req, dpid, **_kwargs):
if req.body == '':
flow = {}
else:
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
flows = _ofctl.get_flow_stats(dp, self.waiters, flow)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(flows)
return Response(content_type='application/json', body=body)
def get_aggregate_flow_stats(self, req, dpid, **_kwargs):
if req.body == '':
flow = {}
else:
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
flows = _ofctl.get_aggregate_flow_stats(dp, self.waiters, flow)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(flows)
return Response(content_type='application/json', body=body)
def get_table_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
ports = _ofctl.get_table_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(ports)
return Response(content_type='application/json', body=body)
def get_table_features(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
ports = _ofctl.get_table_features(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(ports)
return Response(content_type='application/json', body=body)
def get_port_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
ports = _ofctl.get_port_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(ports)
return Response(content_type='application/json', body=body)
def get_queue_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
queues = _ofctl.get_queue_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(queues)
return Response(content_type='application/json', body=body)
def get_queue_config(self, req, dpid, port, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
if type(port) == str and not port.isdigit():
LOG.debug('invalid port %s', port)
return Response(status=400)
dp = self.dpset.get(int(dpid))
port = int(port)
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
queues = _ofctl.get_queue_config(dp, port, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(queues)
return Response(content_type='application/json', body=body)
def get_meter_features(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_meter_features'):
meters = _ofctl.get_meter_features(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_meter_config(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_meter_config'):
meters = _ofctl.get_meter_config(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_meter_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_meter_stats'):
meters = _ofctl.get_meter_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_group_features(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_group_features'):
groups = _ofctl.get_group_features(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_group_desc(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_group_desc'):
groups = _ofctl.get_group_desc(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_group_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_group_stats'):
groups = _ofctl.get_group_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_port_desc(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
groups = _ofctl.get_port_desc(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def mod_flow_entry(self, req, cmd, **_kwargs):
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = flow.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd == 'add':
cmd = dp.ofproto.OFPFC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPFC_MODIFY
elif cmd == 'modify_strict':
cmd = dp.ofproto.OFPFC_MODIFY_STRICT
elif cmd == 'delete':
cmd = dp.ofproto.OFPFC_DELETE
elif cmd == 'delete_strict':
cmd = dp.ofproto.OFPFC_DELETE_STRICT
else:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
_ofctl.mod_flow_entry(dp, flow, cmd)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def delete_flow_entry(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
if ofproto_v1_0.OFP_VERSION == _ofp_version:
flow = {}
else:
flow = {'table_id': dp.ofproto.OFPTT_ALL}
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
_ofctl.mod_flow_entry(dp, flow, dp.ofproto.OFPFC_DELETE)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def mod_meter_entry(self, req, cmd, **_kwargs):
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = flow.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd == 'add':
cmd = dp.ofproto.OFPMC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPMC_MODIFY
elif cmd == 'delete':
cmd = dp.ofproto.OFPMC_DELETE
else:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'mod_meter_entry'):
_ofctl.mod_meter_entry(dp, flow, cmd)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
return Response(status=200)
def mod_group_entry(self, req, cmd, **_kwargs):
try:
group = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = group.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd == 'add':
cmd = dp.ofproto.OFPGC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPGC_MODIFY
elif cmd == 'delete':
cmd = dp.ofproto.OFPGC_DELETE
else:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'mod_group_entry'):
_ofctl.mod_group_entry(dp, group, cmd)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
return Response(status=200)
def mod_port_behavior(self, req, cmd, **_kwargs):
try:
port_config = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = port_config.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
port_no = port_config.get('port_no', 0)
if type(port_no) == str and not port_no.isdigit():
LOG.debug('invalid port_no %s', port_no)
return Response(status=400)
port_info = self.dpset.port_state[int(dpid)].get(port_no)
if port_info:
port_config.setdefault('hw_addr', port_info.hw_addr)
port_config.setdefault('advertise', port_info.advertised)
else:
return Response(status=404)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd != 'modify':
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
_ofctl.mod_port_behavior(dp, port_config)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def send_experimenter(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
try:
exp = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'send_experimenter'):
_ofctl.send_experimenter(dp, exp)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
class RestStatsApi(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_2.OFP_VERSION,
ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'dpset': dpset.DPSet,
'wsgi': WSGIApplication
}
def __init__(self, *args, **kwargs):
super(RestStatsApi, self).__init__(*args, **kwargs)
self.dpset = kwargs['dpset']
wsgi = kwargs['wsgi']
self.waiters = {}
self.data = {}
self.data['dpset'] = self.dpset
self.data['waiters'] = self.waiters
mapper = wsgi.mapper
wsgi.registory['StatsController'] = self.data
path = '/stats'
uri = path + '/switches'
mapper.connect('stats', uri,
controller=StatsController, action='get_dpids',
conditions=dict(method=['GET']))
uri = path + '/desc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_desc_stats',
conditions=dict(method=['GET']))
uri = path + '/flow/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_flow_stats',
conditions=dict(method=['GET', 'POST']))
uri = path + '/aggregateflow/{dpid}'
mapper.connect('stats', uri,
controller=StatsController,
action='get_aggregate_flow_stats',
conditions=dict(method=['GET', 'POST']))
uri = path + '/table/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_table_stats',
conditions=dict(method=['GET']))
uri = path + '/tablefeatures/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_table_features',
conditions=dict(method=['GET']))
uri = path + '/port/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_port_stats',
conditions=dict(method=['GET']))
uri = path + '/queue/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_queue_stats',
conditions=dict(method=['GET']))
uri = path + '/queueconfig/{dpid}/{port}'
mapper.connect('stats', uri,
controller=StatsController, action='get_queue_config',
conditions=dict(method=['GET']))
uri = path + '/meterfeatures/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_features',
conditions=dict(method=['GET']))
uri = path + '/meterconfig/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_config',
conditions=dict(method=['GET']))
uri = path + '/meter/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_stats',
conditions=dict(method=['GET']))
uri = path + '/groupfeatures/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_features',
conditions=dict(method=['GET']))
uri = path + '/groupdesc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_desc',
conditions=dict(method=['GET']))
uri = path + '/group/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_stats',
conditions=dict(method=['GET']))
uri = path + '/portdesc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_port_desc',
conditions=dict(method=['GET']))
uri = path + '/flowentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_flow_entry',
conditions=dict(method=['POST']))
uri = path + '/flowentry/clear/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='delete_flow_entry',
conditions=dict(method=['DELETE']))
uri = path + '/meterentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_meter_entry',
conditions=dict(method=['POST']))
uri = path + '/groupentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_group_entry',
conditions=dict(method=['POST']))
uri = path + '/portdesc/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_port_behavior',
conditions=dict(method=['POST']))
uri = path + '/experimenter/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='send_experimenter',
conditions=dict(method=['POST']))
@set_ev_cls([ofp_event.EventOFPStatsReply,
ofp_event.EventOFPDescStatsReply,
ofp_event.EventOFPFlowStatsReply,
ofp_event.EventOFPAggregateStatsReply,
ofp_event.EventOFPTableStatsReply,
ofp_event.EventOFPTableFeaturesStatsReply,
ofp_event.EventOFPPortStatsReply,
ofp_event.EventOFPQueueStatsReply,
ofp_event.EventOFPMeterStatsReply,
ofp_event.EventOFPMeterFeaturesStatsReply,
ofp_event.EventOFPMeterConfigStatsReply,
ofp_event.EventOFPGroupStatsReply,
ofp_event.EventOFPGroupFeaturesStatsReply,
ofp_event.EventOFPGroupDescStatsReply,
ofp_event.EventOFPPortDescStatsReply
], MAIN_DISPATCHER)
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
flags = 0
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
flags = dp.ofproto.OFPMPF_REPLY_MORE
if msg.flags & flags:
return
del self.waiters[dp.id][msg.xid]
lock.set()
@set_ev_cls([ofp_event.EventOFPSwitchFeatures,
ofp_event.EventOFPQueueGetConfigReply], MAIN_DISPATCHER)
def features_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
del self.waiters[dp.id][msg.xid]
lock.set()
| |
"""Support for Synology NAS Sensors."""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME,
CONF_HOST,
CONF_USERNAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
ATTR_ATTRIBUTION,
TEMP_CELSIUS,
CONF_MONITORED_CONDITIONS,
EVENT_HOMEASSISTANT_START,
CONF_DISKS,
)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Synology"
CONF_VOLUMES = "volumes"
DEFAULT_NAME = "Synology DSM"
DEFAULT_PORT = 5001
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
_UTILISATION_MON_COND = {
"cpu_other_load": ["CPU Load (Other)", "%", "mdi:chip"],
"cpu_user_load": ["CPU Load (User)", "%", "mdi:chip"],
"cpu_system_load": ["CPU Load (System)", "%", "mdi:chip"],
"cpu_total_load": ["CPU Load (Total)", "%", "mdi:chip"],
"cpu_1min_load": ["CPU Load (1 min)", "%", "mdi:chip"],
"cpu_5min_load": ["CPU Load (5 min)", "%", "mdi:chip"],
"cpu_15min_load": ["CPU Load (15 min)", "%", "mdi:chip"],
"memory_real_usage": ["Memory Usage (Real)", "%", "mdi:memory"],
"memory_size": ["Memory Size", "Mb", "mdi:memory"],
"memory_cached": ["Memory Cached", "Mb", "mdi:memory"],
"memory_available_swap": ["Memory Available (Swap)", "Mb", "mdi:memory"],
"memory_available_real": ["Memory Available (Real)", "Mb", "mdi:memory"],
"memory_total_swap": ["Memory Total (Swap)", "Mb", "mdi:memory"],
"memory_total_real": ["Memory Total (Real)", "Mb", "mdi:memory"],
"network_up": ["Network Up", "Kbps", "mdi:upload"],
"network_down": ["Network Down", "Kbps", "mdi:download"],
}
_STORAGE_VOL_MON_COND = {
"volume_status": ["Status", None, "mdi:checkbox-marked-circle-outline"],
"volume_device_type": ["Type", None, "mdi:harddisk"],
"volume_size_total": ["Total Size", None, "mdi:chart-pie"],
"volume_size_used": ["Used Space", None, "mdi:chart-pie"],
"volume_percentage_used": ["Volume Used", "%", "mdi:chart-pie"],
"volume_disk_temp_avg": ["Average Disk Temp", None, "mdi:thermometer"],
"volume_disk_temp_max": ["Maximum Disk Temp", None, "mdi:thermometer"],
}
_STORAGE_DSK_MON_COND = {
"disk_name": ["Name", None, "mdi:harddisk"],
"disk_device": ["Device", None, "mdi:dots-horizontal"],
"disk_smart_status": ["Status (Smart)", None, "mdi:checkbox-marked-circle-outline"],
"disk_status": ["Status", None, "mdi:checkbox-marked-circle-outline"],
"disk_exceed_bad_sector_thr": ["Exceeded Max Bad Sectors", None, "mdi:test-tube"],
"disk_below_remain_life_thr": ["Below Min Remaining Life", None, "mdi:test-tube"],
"disk_temp": ["Temperature", None, "mdi:thermometer"],
}
_MONITORED_CONDITIONS = (
list(_UTILISATION_MON_COND.keys())
+ list(_STORAGE_VOL_MON_COND.keys())
+ list(_STORAGE_DSK_MON_COND.keys())
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=True): cv.boolean,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list, [vol.In(_MONITORED_CONDITIONS)]
),
vol.Optional(CONF_DISKS): cv.ensure_list,
vol.Optional(CONF_VOLUMES): cv.ensure_list,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Synology NAS Sensor."""
def run_setup(event):
"""Wait until Home Assistant is fully initialized before creating.
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_ssl = config.get(CONF_SSL)
unit = hass.config.units.temperature_unit
monitored_conditions = config.get(CONF_MONITORED_CONDITIONS)
api = SynoApi(host, port, username, password, unit, use_ssl)
sensors = [
SynoNasUtilSensor(api, name, variable, _UTILISATION_MON_COND[variable])
for variable in monitored_conditions
if variable in _UTILISATION_MON_COND
]
# Handle all volumes
if api.storage.volumes is not None:
for volume in config.get(CONF_VOLUMES, api.storage.volumes):
sensors += [
SynoNasStorageSensor(
api, name, variable, _STORAGE_VOL_MON_COND[variable], volume
)
for variable in monitored_conditions
if variable in _STORAGE_VOL_MON_COND
]
# Handle all disks
if api.storage.disks is not None:
for disk in config.get(CONF_DISKS, api.storage.disks):
sensors += [
SynoNasStorageSensor(
api, name, variable, _STORAGE_DSK_MON_COND[variable], disk
)
for variable in monitored_conditions
if variable in _STORAGE_DSK_MON_COND
]
add_entities(sensors, True)
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class SynoApi:
"""Class to interface with Synology DSM API."""
def __init__(self, host, port, username, password, temp_unit, use_ssl):
"""Initialize the API wrapper class."""
from SynologyDSM import SynologyDSM
self.temp_unit = temp_unit
try:
self._api = SynologyDSM(host, port, username, password, use_https=use_ssl)
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.error("Error setting up Synology DSM")
# Will be updated when update() gets called.
self.utilisation = self._api.utilisation
self.storage = self._api.storage
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update function for updating api information."""
self._api.update()
class SynoNasSensor(Entity):
"""Representation of a Synology NAS Sensor."""
def __init__(self, api, name, variable, variable_info, monitor_device=None):
"""Initialize the sensor."""
self.var_id = variable
self.var_name = "{} {}".format(name, variable_info[0])
self.var_units = variable_info[1]
self.var_icon = variable_info[2]
self.monitor_device = monitor_device
self._api = api
@property
def name(self):
"""Return the name of the sensor, if any."""
if self.monitor_device is not None:
return f"{self.var_name} ({self.monitor_device})"
return self.var_name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self.var_id in ["volume_disk_temp_avg", "volume_disk_temp_max", "disk_temp"]:
return self._api.temp_unit
return self.var_units
def update(self):
"""Get the latest data for the states."""
if self._api is not None:
self._api.update()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
class SynoNasUtilSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
network_sensors = ["network_up", "network_down"]
memory_sensors = [
"memory_size",
"memory_cached",
"memory_available_swap",
"memory_available_real",
"memory_total_swap",
"memory_total_real",
]
if self.var_id in network_sensors or self.var_id in memory_sensors:
attr = getattr(self._api.utilisation, self.var_id)(False)
if self.var_id in network_sensors:
return round(attr / 1024.0, 1)
if self.var_id in memory_sensors:
return round(attr / 1024.0 / 1024.0, 1)
else:
return getattr(self._api.utilisation, self.var_id)
class SynoNasStorageSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
temp_sensors = ["volume_disk_temp_avg", "volume_disk_temp_max", "disk_temp"]
if self.monitor_device is not None:
if self.var_id in temp_sensors:
attr = getattr(self._api.storage, self.var_id)(self.monitor_device)
if attr is None:
return None
if self._api.temp_unit == TEMP_CELSIUS:
return attr
return round(attr * 1.8 + 32.0, 1)
return getattr(self._api.storage, self.var_id)(self.monitor_device)
| |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.python.slim.nets import inception
from tensorflow.contrib.slim.python.slim.nets import resnet_v2 as resnet_v2
from tensorflow.contrib.slim.python.slim.nets import resnet_utils as resnet_utils
def get_embedder(
embedder_strategy, config, images, is_training, reuse=False,
l2_normalize_embedding=True):
"""Returns an embedder based on config.
Args:
embedder_strategy: String, name of embedder version to return.
config: LuaTable object, training config.
images: 4-D float `Tensor` containing batch images.
is_training: Boolean or placeholder for boolean,
indicator for whether or not we're training.
reuse: Boolean: Reuse embedder variable scope.
l2_normalize_embedding: Boolean, whether or not to l2 normalize the
embedding.
Returns:
embedder: An `Embedder` object.
Raises:
ValueError: if unknown embedder_strategy specified.
"""
if embedder_strategy == 'inception_baseline':
pretrained_ckpt = config.inception_conv_ss_fc.pretrained_checkpoint
return InceptionBaselineEmbedder(
images,
pretrained_ckpt,
config.random_projection,
config.random_projection_dim)
strategy_to_embedder = {
'inception_conv_ss_fc': InceptionConvSSFCEmbedder,
'resnet': ResnetEmbedder,
}
if embedder_strategy not in strategy_to_embedder:
raise ValueError('unknown embedder_strategy', embedder_strategy)
embedding_size = config.embedding_size
l2_reg_weight = config.learning.l2_reg_weight
embedder = strategy_to_embedder[embedder_strategy](
config[embedder_strategy], images, embedding_size,
is_training, embedding_l2=l2_normalize_embedding,
l2_reg_weight=l2_reg_weight, reuse=reuse)
return embedder
def build_inceptionv3_graph(images, endpoint, is_training, checkpoint,
reuse=False):
"""Builds an InceptionV3 model graph.
Args:
images: A 4-D float32 `Tensor` of batch images.
endpoint: String, name of the InceptionV3 endpoint.
is_training: Boolean, whether or not to build a training or inference graph.
checkpoint: String, path to the pretrained model checkpoint.
reuse: Boolean, whether or not we are reusing the embedder.
Returns:
inception_output: `Tensor` holding the InceptionV3 output.
inception_variables: List of inception variables.
init_fn: Function to initialize the weights (if not reusing, then None).
"""
with slim.arg_scope(inception.inception_v3_arg_scope()):
_, endpoints = inception.inception_v3(
images, num_classes=1001, is_training=is_training)
inception_output = endpoints[endpoint]
inception_variables = slim.get_variables_to_restore()
inception_variables = [
i for i in inception_variables if 'global_step' not in i.name]
if is_training and not reuse:
init_saver = tf.train.Saver(inception_variables)
def init_fn(scaffold, sess):
del scaffold
init_saver.restore(sess, checkpoint)
else:
init_fn = None
return inception_output, inception_variables, init_fn
class InceptionBaselineEmbedder(object):
"""Produces pre-trained InceptionV3 embeddings."""
def __init__(self, images, pretrained_ckpt, reuse=False,
random_projection=False, random_projection_dim=32):
# Build InceptionV3 graph.
(inception_output,
self.inception_variables,
self.init_fn) = build_inceptionv3_graph(
images, 'Mixed_7c', False, pretrained_ckpt, reuse)
# Pool 8x8x2048 -> 1x1x2048.
embedding = slim.avg_pool2d(inception_output, [8, 8], stride=1)
embedding = tf.squeeze(embedding, [1, 2])
if random_projection:
embedding = tf.matmul(
embedding, tf.random_normal(
shape=[2048, random_projection_dim], seed=123))
self.embedding = embedding
class PretrainedEmbedder(object):
"""Base class for embedders that take pre-trained networks as input."""
__metaclass__ = ABCMeta
def __init__(self, config, images, embedding_size, is_training,
embedding_l2=True, l2_reg_weight=1e-6, reuse=False):
"""Constructor.
Args:
config: A T object holding training config.
images: A 4-D float32 `Tensor` holding images to embed.
embedding_size: Int, the size of the embedding.
is_training: Boolean, whether or not this is a training or inference-time
graph.
embedding_l2: Boolean, whether or not to l2 normalize the embedding.
l2_reg_weight: Float, weight applied to l2 weight regularization.
reuse: Boolean, whether or not we're reusing this graph.
"""
# Pull out all the embedder hyperparameters.
self._config = config
self._embedding_size = embedding_size
self._l2_reg_weight = l2_reg_weight
self._embedding_l2 = embedding_l2
self._is_training = is_training
self._reuse = reuse
# Pull out pretrained hparams.
pretrained_checkpoint = config.pretrained_checkpoint
pretrained_layer = config.pretrained_layer
pretrained_keep_prob = config.dropout.keep_pretrained
# Build pretrained graph.
(pretrained_output,
self._pretrained_variables,
self.init_fn) = self.build_pretrained_graph(
images, pretrained_layer, pretrained_checkpoint, is_training, reuse)
# Optionally drop out the activations.
pretrained_output = slim.dropout(
pretrained_output, keep_prob=pretrained_keep_prob,
is_training=is_training)
self._pretrained_output = pretrained_output
@abstractmethod
def build_pretrained_graph(self, images, layer, pretrained_checkpoint,
is_training, reuse):
"""Builds the graph for the pre-trained network.
Method to be overridden by implementations.
Args:
images: A 4-D tf.float32 `Tensor` holding images to embed.
layer: String, defining which pretrained layer to take as input
to adaptation layers.
pretrained_checkpoint: String, path to a checkpoint used to load
pretrained weights.
is_training: Boolean, whether or not we're in training mode.
reuse: Boolean, whether or not to reuse embedder weights.
Returns:
pretrained_output: A 2 or 3-d tf.float32 `Tensor` holding pretrained
activations.
"""
pass
@abstractmethod
def construct_embedding(self):
"""Builds an embedding function on top of images.
Method to be overridden by implementations.
Returns:
embeddings: A 2-d float32 `Tensor` of shape [batch_size, embedding_size]
holding the embedded images.
"""
pass
def get_trainable_variables(self):
"""Gets a list of variables to optimize."""
if self._config.finetune:
return tf.trainable_variables()
else:
adaptation_only_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=self._adaptation_scope)
return adaptation_only_vars
class ResnetEmbedder(PretrainedEmbedder):
"""Resnet TCN.
ResnetV2 -> resnet adaptation layers -> optional l2 normalize -> embedding.
"""
def __init__(self, config, images, embedding_size, is_training,
embedding_l2=True, l2_reg_weight=1e-6, reuse=False):
super(ResnetEmbedder, self).__init__(
config, images, embedding_size, is_training, embedding_l2,
l2_reg_weight, reuse)
def build_pretrained_graph(
self, images, resnet_layer, checkpoint, is_training, reuse=False):
"""See baseclass."""
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
_, endpoints = resnet_v2.resnet_v2_50(
images, is_training=is_training, reuse=reuse)
resnet_layer = 'resnet_v2_50/block%d' % resnet_layer
resnet_output = endpoints[resnet_layer]
resnet_variables = slim.get_variables_to_restore()
resnet_variables = [
i for i in resnet_variables if 'global_step' not in i.name]
if is_training and not reuse:
init_saver = tf.train.Saver(resnet_variables)
def init_fn(scaffold, sess):
del scaffold
init_saver.restore(sess, checkpoint)
else:
init_fn = None
return resnet_output, resnet_variables, init_fn
def construct_embedding(self):
"""Builds an embedding function on top of images.
Method to be overridden by implementations.
Returns:
embeddings: A 2-d float32 `Tensor` of shape [batch_size, embedding_size]
holding the embedded images.
"""
with tf.variable_scope('tcn_net', reuse=self._reuse) as vs:
self._adaptation_scope = vs.name
net = self._pretrained_output
# Define some adaptation blocks on top of the pre-trained resnet output.
adaptation_blocks = []
adaptation_block_params = [map(
int, i.split('_')) for i in self._config.adaptation_blocks.split('-')]
for i, (depth, num_units) in enumerate(adaptation_block_params):
block = resnet_v2.resnet_v2_block(
'adaptation_block_%d' % i, base_depth=depth, num_units=num_units,
stride=1)
adaptation_blocks.append(block)
# Stack them on top of the resent output.
net = resnet_utils.stack_blocks_dense(
net, adaptation_blocks, output_stride=None)
# Average pool the output.
net = tf.reduce_mean(net, [1, 2], name='adaptation_pool', keep_dims=True)
if self._config.emb_connection == 'fc':
# Use fully connected layer to project to embedding layer.
fc_hidden_sizes = self._config.fc_hidden_sizes
if fc_hidden_sizes == 'None':
fc_hidden_sizes = []
else:
fc_hidden_sizes = map(int, fc_hidden_sizes.split('_'))
fc_hidden_keep_prob = self._config.dropout.keep_fc
net = tf.squeeze(net)
for fc_hidden_size in fc_hidden_sizes:
net = slim.layers.fully_connected(net, fc_hidden_size)
if fc_hidden_keep_prob < 1.0:
net = slim.dropout(net, keep_prob=fc_hidden_keep_prob,
is_training=self._is_training)
# Connect last FC layer to embedding.
embedding = slim.layers.fully_connected(net, self._embedding_size,
activation_fn=None)
else:
# Use 1x1 conv layer to project to embedding layer.
embedding = slim.conv2d(
net, self._embedding_size, [1, 1], activation_fn=None,
normalizer_fn=None, scope='embedding')
embedding = tf.squeeze(embedding)
# Optionally L2 normalize the embedding.
if self._embedding_l2:
embedding = tf.nn.l2_normalize(embedding, dim=1)
return embedding
def get_trainable_variables(self):
"""Gets a list of variables to optimize."""
if self._config.finetune:
return tf.trainable_variables()
else:
adaptation_only_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=self._adaptation_scope)
return adaptation_only_vars
class InceptionEmbedderBase(PretrainedEmbedder):
"""Base class for embedders that take pre-trained InceptionV3 activations."""
def __init__(self, config, images, embedding_size, is_training,
embedding_l2=True, l2_reg_weight=1e-6, reuse=False):
super(InceptionEmbedderBase, self).__init__(
config, images, embedding_size, is_training, embedding_l2,
l2_reg_weight, reuse)
def build_pretrained_graph(
self, images, inception_layer, checkpoint, is_training, reuse=False):
"""See baseclass."""
# Build InceptionV3 graph.
inception_output, inception_variables, init_fn = build_inceptionv3_graph(
images, inception_layer, is_training, checkpoint, reuse)
return inception_output, inception_variables, init_fn
class InceptionConvSSFCEmbedder(InceptionEmbedderBase):
"""TCN Embedder V1.
InceptionV3 (mixed_5d) -> conv layers -> spatial softmax ->
fully connected -> optional l2 normalize -> embedding.
"""
def __init__(self, config, images, embedding_size, is_training,
embedding_l2=True, l2_reg_weight=1e-6, reuse=False):
super(InceptionConvSSFCEmbedder, self).__init__(
config, images, embedding_size, is_training, embedding_l2,
l2_reg_weight, reuse)
# Pull out all the hyperparameters specific to this embedder.
self._additional_conv_sizes = config.additional_conv_sizes
self._conv_hidden_keep_prob = config.dropout.keep_conv
self._fc_hidden_sizes = config.fc_hidden_sizes
self._fc_hidden_keep_prob = config.dropout.keep_fc
def construct_embedding(self):
"""Builds a conv -> spatial softmax -> FC adaptation network."""
is_training = self._is_training
normalizer_params = {'is_training': is_training}
with tf.variable_scope('tcn_net', reuse=self._reuse) as vs:
self._adaptation_scope = vs.name
with slim.arg_scope(
[slim.layers.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params,
weights_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight),
biases_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight)):
with slim.arg_scope(
[slim.layers.fully_connected],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params,
weights_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight),
biases_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight)):
# Input to embedder is pre-trained inception output.
net = self._pretrained_output
# Optionally add more conv layers.
for num_filters in self._additional_conv_sizes:
net = slim.layers.conv2d(
net, num_filters, kernel_size=[3, 3], stride=[1, 1])
net = slim.dropout(net, keep_prob=self._conv_hidden_keep_prob,
is_training=is_training)
# Take the spatial soft arg-max of the last convolutional layer.
# This is a form of spatial attention over the activations.
# See more here: http://arxiv.org/abs/1509.06113.
net = tf.contrib.layers.spatial_softmax(net)
self.spatial_features = net
# Add fully connected layers.
net = slim.layers.flatten(net)
for fc_hidden_size in self._fc_hidden_sizes:
net = slim.layers.fully_connected(net, fc_hidden_size)
if self._fc_hidden_keep_prob < 1.0:
net = slim.dropout(net, keep_prob=self._fc_hidden_keep_prob,
is_training=is_training)
# Connect last FC layer to embedding.
net = slim.layers.fully_connected(net, self._embedding_size,
activation_fn=None)
# Optionally L2 normalize the embedding.
if self._embedding_l2:
net = tf.nn.l2_normalize(net, dim=1)
return net
| |
import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import SET_NULL
from django.db.models.signals import post_init, post_save, pre_save
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from multi_email_field.fields import MultiEmailField
from symposion.conference.models import Conference
from pycon.sponsorship import SPONSOR_COORDINATORS
from pycon.sponsorship.managers import SponsorManager
from symposion.utils.mail import send_email
# The benefits we track as individual fields on sponsors
# using separate SponsorBenefit records.
# Names are the names in the database as defined by PyCon organizers.
# Field names are the benefit names, lowercased, with
# spaces changed to _, and with "_benefit" appended.
# Column titles are arbitrary.
BENEFITS = [
# Print logo not being used for 2017 but keep it in anyway
{
'name': 'Print logo',
'field_name': 'print_logo_benefit',
'column_title': _(u'Print Logo'),
},
{
'name': 'Advertisement',
'field_name': 'advertisement_benefit',
'column_title': _(u'Ad'),
}
]
class SponsorLevel(models.Model):
conference = models.ForeignKey(Conference, verbose_name=_("conference"))
name = models.CharField(_("name"), max_length=100)
available = models.BooleanField(default=True)
order = models.IntegerField(_("order"), default=0)
cost = models.PositiveIntegerField(_("cost"))
description = models.TextField(_("description"), blank=True, help_text=_("This is private."))
class Meta:
ordering = ["conference", "order"]
verbose_name = _("sponsor level")
verbose_name_plural = _("sponsor levels")
def __unicode__(self):
return u"%s %s" % (self.conference, self.name)
def sponsors(self):
return self.sponsor_set.filter(active=True).order_by("added")
class Sponsor(models.Model):
applicant = models.ForeignKey(User, related_name="sponsorships", verbose_name=_("applicant"), null=True, on_delete=SET_NULL)
name = models.CharField(_("Sponsor Name"), max_length=100)
display_url = models.CharField(
_("Link text - text to display on link to sponsor page, if different from the actual link"),
max_length=200,
default='',
blank=True
)
external_url = models.URLField(
_("Link to sponsor web page"),
help_text=_("(Must include https:// or http://.)")
)
twitter_username = models.CharField(
_("Twitter username"), blank=True, max_length=15,
)
annotation = models.TextField(_("annotation"), blank=True)
contact_name = models.CharField(_("Contact Name"), max_length=100)
contact_emails = MultiEmailField(
_(u"Contact Emails"), default='',
help_text=_(u"Please enter one email address per line.")
)
contact_phone = models.CharField(_(u"Contact Phone"), max_length=32)
contact_address = models.TextField(_(u"Contact Address"))
level = models.ForeignKey(SponsorLevel, verbose_name=_("level"))
added = models.DateTimeField(_("added"), default=datetime.datetime.now)
active = models.BooleanField(_("active"), default=False)
approval_time = models.DateTimeField(null=True, blank=True, editable=False)
wants_table = models.BooleanField(
_(
'Does your organization want a table at the job fair? '
'(See <a href="/2017/sponsors/fees/">Estimated Sponsor Fees</a> '
'for costs that might be involved.)'
), default=False)
wants_booth = models.BooleanField(
_(
'Does your organization want a booth on the expo floor? '
'(See <a href="/2017/sponsors/fees/">Estimated Sponsor Fees</a> '
'for costs that might be involved.)'
), default=False)
small_entity_discount = models.BooleanField(
_(
'Does your organization have fewer than 25 employees,'
' which qualifies you for our Small Entity Discount?'
), default=False)
# Whether things are complete
# True = complete, False = incomplate, Null = n/a for this sponsor level
print_logo_benefit = models.NullBooleanField(help_text=_(u"Print logo benefit is complete"))
advertisement_benefit = models.NullBooleanField(help_text=_(u"Advertisement benefit is complete"))
registration_promo_codes = models.CharField(max_length=200, blank=True, default='')
expo_promo_codes = models.CharField(max_length=200, blank=True, default='')
booth_number = models.IntegerField(blank=True, null=True, default=None)
job_fair_table_number = models.IntegerField(blank=True, null=True, default=None)
web_description = models.TextField(
_(u"Company description (to show on the web site)"),
)
web_logo = models.ImageField(
_(u"Company logo (to show on the web site)"),
upload_to="sponsor_files",
null=True, # This is nullable in case old data doesn't have a web logo
# We enforce it on all new or edited sponsors though.
)
objects = SponsorManager()
def __unicode__(self):
return self.name
class Meta:
verbose_name = _("sponsor")
verbose_name_plural = _("sponsors")
ordering = ['name']
def save(self, *args, **kwargs):
# Set fields related to benefits being complete
for benefit in BENEFITS:
field_name = benefit['field_name']
benefit_name = benefit['name']
setattr(self, field_name, self.benefit_is_complete(benefit_name))
super(Sponsor, self).save(*args, **kwargs)
def get_absolute_url(self):
if self.active:
return reverse("sponsor_detail", kwargs={"pk": self.pk})
return reverse("sponsor_list")
def get_display_url(self):
"""
Return the text to display on the sponsor's link
"""
if self.display_url:
return self.display_url
else:
return self.external_url
def render_email(self, text):
"""Replace special strings in text with values from the sponsor.
%%NAME%% --> Sponsor name
%%REGISTRATION_PROMO_CODES%% --> Registration promo codes, or empty string
%%EXPO_PROMO_CODES%% --> Expo Hall only promo codes, or empty string
%%BOOTH_NUMBER%% --> Booth number, or empty string if not set
%%JOB_FAIR_TABLE_NUMBER%%" --> Job fair tabl number, or empty string if not set
"""
text = text.replace("%%NAME%%", self.name)
text = text.replace("%%REGISTRATION_PROMO_CODES%%",
self.registration_promo_codes or 'N/A')
text = text.replace("%%EXPO_PROMO_CODES%%",
self.expo_promo_codes or 'N/A')
# The next two are numbers, or if not set, None. We don't want to
# display "None" :-), but we might want to display "0".
booth = str(self.booth_number) if self.booth_number is not None else ""
text = text.replace("%%BOOTH_NUMBER%%", booth)
table = str(self.job_fair_table_number) if self.job_fair_table_number is not None else ""
text = text.replace("%%JOB_FAIR_TABLE_NUMBER%%", table)
return text
@cached_property
def website_logo_url(self):
if self.web_logo:
return self.web_logo.url
@property
def joblisting_text(self):
if not hasattr(self, "_joblisting_text"):
self._joblisting_text = None
benefits = self.sponsor_benefits.filter(
benefit__name__startswith='Job Listing',
)
if benefits.count():
self._joblisting_text = benefits[0].text
return self._joblisting_text
@property
def website_logo(self):
return self.web_logo
def reset_benefits(self):
"""
Reset all benefits for this sponsor to the defaults for their
sponsorship level.
"""
level = None
try:
level = self.level
except SponsorLevel.DoesNotExist:
pass
allowed_benefits = []
if level:
for benefit_level in level.benefit_levels.all():
# Create all needed benefits if they don't exist already
sponsor_benefit, created = SponsorBenefit.objects.get_or_create(
sponsor=self, benefit=benefit_level.benefit)
# and set to default limits for this level.
sponsor_benefit.max_words = benefit_level.max_words
sponsor_benefit.other_limits = benefit_level.other_limits
# and set to active
sponsor_benefit.active = True
# @@@ We don't call sponsor_benefit.clean here. This means
# that if the sponsorship level for a sponsor is adjusted
# downwards, an existing too-long text entry can remain,
# and won't raise a validation error until it's next
# edited.
sponsor_benefit.save()
allowed_benefits.append(sponsor_benefit.pk)
# Any remaining sponsor benefits that don't normally belong to
# this level are set to inactive
self.sponsor_benefits.exclude(pk__in=allowed_benefits).update(active=False, max_words=None, other_limits="")
# @@@ should this just be done centrally?
def send_coordinator_emails(self):
for user in User.objects.filter(groups__name=SPONSOR_COORDINATORS):
send_email(
[user.email], "sponsor_signup",
context={"sponsor": self}
)
def benefit_is_complete(self, name):
"""Return True - benefit is complete, False - benefit is not complete,
or None - benefit not applicable for this sponsor's level """
if BenefitLevel.objects.filter(level=self.level, benefit__name=name).exists():
try:
benefit = self.sponsor_benefits.get(benefit__name=name)
except SponsorBenefit.DoesNotExist:
return False
else:
return benefit.is_complete
else:
return None # Not an applicable benefit for this sponsor's level
def _store_initial_level(sender, instance, **kwargs):
if instance:
instance._initial_level_id = instance.level_id
post_init.connect(_store_initial_level, sender=Sponsor)
def _check_level_change(sender, instance, created, **kwargs):
if instance and (created or instance.level_id != instance._initial_level_id):
instance.reset_benefits()
post_save.connect(_check_level_change, sender=Sponsor)
def _send_admin_email(sender, instance, created, **kwargs):
"""
Send an email to the sponsors mailing list when a new application is
submitted.
"""
if created:
send_email(
to=[settings.SPONSORSHIP_EMAIL],
kind='new_sponsor',
context={
'sponsor': instance,
},
)
post_save.connect(_send_admin_email, sender=Sponsor)
def _store_initial_active(sender, instance, **kwargs):
if instance:
instance._initial_active = instance.active
post_init.connect(_store_initial_active, sender=Sponsor)
post_save.connect(_store_initial_active, sender=Sponsor)
def _check_active_change(sender, instance, **kwargs):
if instance:
if instance.active:
if not instance._initial_active or not instance.approval_time:
# Instance is newly active.
instance.approval_time = datetime.datetime.now()
else:
instance.approval_time = None
pre_save.connect(_check_active_change, sender=Sponsor)
def _send_sponsor_notification_emails(sender, instance, created, **kwargs):
if instance and created:
instance.send_coordinator_emails()
post_save.connect(_send_sponsor_notification_emails, sender=Sponsor)
class Benefit(models.Model):
name = models.CharField(_("name"), max_length=100, unique=True)
description = models.TextField(_("description"), blank=True)
type = models.CharField(
_("type"),
choices=[
("text", "Text"),
("richtext", "Rich Text"),
("file", "File"),
("weblogo", "Web Logo"),
("simple", "Simple")
],
max_length=10,
default="simple"
)
def __unicode__(self):
return self.name
class BenefitLevel(models.Model):
benefit = models.ForeignKey(
Benefit,
related_name="benefit_levels",
verbose_name=_("benefit")
)
level = models.ForeignKey(
SponsorLevel,
related_name="benefit_levels",
verbose_name=_("level")
)
max_words = models.PositiveIntegerField(_("max words"), blank=True, null=True)
other_limits = models.CharField(_("other limits"), max_length=200, blank=True)
class Meta:
ordering = ["level"]
def __unicode__(self):
return u"%s - %s" % (self.level, self.benefit)
class SponsorBenefit(models.Model):
sponsor = models.ForeignKey(
Sponsor,
related_name="sponsor_benefits",
verbose_name=_("sponsor")
)
benefit = models.ForeignKey(
Benefit,
related_name="sponsor_benefits",
verbose_name=_("benefit")
)
active = models.BooleanField(default=True)
# Limits: will initially be set to defaults from corresponding BenefitLevel
max_words = models.PositiveIntegerField(_("max words"), blank=True, null=True)
other_limits = models.CharField(_("other limits"), max_length=200, blank=True)
# Data: zero or one of these fields will be used, depending on the
# type of the Benefit (text, file, or simple)
text = models.TextField(_("text"), blank=True)
upload = models.FileField(_("file"), blank=True, upload_to="sponsor_files")
# Whether any assets required from the sponsor have been provided
# (e.g. a logo file for a Web logo benefit).
is_complete = models.NullBooleanField(help_text=_(u"True - benefit complete; False - benefit incomplete; Null - n/a"))
class Meta:
ordering = ['-active']
def __unicode__(self):
return u"%s - %s (%s)" % (self.sponsor, self.benefit,
self.benefit.type)
def save(self, *args, **kwargs):
# Validate - save() doesn't clean your model by default, so call
# it explicitly before saving
self.full_clean()
self.is_complete = self._is_complete()
super(SponsorBenefit, self).save(*args, **kwargs)
def clean(self):
if self.max_words and len(self.text.split()) > self.max_words:
raise ValidationError("Sponsorship level only allows for %s "
"words." % self.max_words)
editable_fields = self.data_fields()
if bool(self.text) and 'text' not in editable_fields:
raise ValidationError("Benefit type %s may not have text"
% self.benefit.type)
if bool(self.upload) and 'upload' not in editable_fields:
raise ValidationError("Benefit type %s may not have an uploaded "
"file (%s)" % (self.benefit.type,
self.upload))
def data_fields(self):
"""
Return list of data field names which should be editable for
this ``SponsorBenefit``, depending on its ``Benefit`` type.
"""
if self.benefit.type == "file" or self.benefit.type == "weblogo":
return ["upload"]
elif self.benefit.type in ("text", "richtext", "simple"):
return ["text"]
return []
def _is_complete(self):
return self.active and \
((self.benefit.type in ('text', 'richtext') and bool(self.text))
or (self.benefit.type in ('file', 'weblogo') and bool(self.upload)))
| |
"""
tests the pysat averaging code
"""
from nose.tools import raises
import numpy as np
import pandas as pds
import warnings
import pysat
from pysat.ssnl import avg
class TestBasics():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean')
self.bounds1 = (pysat.datetime(2008, 1, 1), pysat.datetime(2008, 1, 3))
self.bounds2 = (pysat.datetime(2009, 1, 1), pysat.datetime(2009, 1, 2))
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.bounds1, self.bounds2
def test_basic_seasonal_median2D(self):
""" Test the basic seasonal 2D median"""
self.testInst.bounds = self.bounds1
results = avg.median2D(self.testInst, [0., 360., 24.], 'longitude',
[0., 24, 24], 'mlt',
['dummy1', 'dummy2', 'dummy3'])
dummy_val = results['dummy1']['median']
dummy_dev = results['dummy1']['avg_abs_dev']
dummy2_val = results['dummy2']['median']
dummy2_dev = results['dummy2']['avg_abs_dev']
dummy3_val = results['dummy3']['median']
dummy3_dev = results['dummy3']['avg_abs_dev']
dummy_x = results['dummy1']['bin_x']
dummy_y = results['dummy1']['bin_y']
# iterate over all y rows
# value should be equal to integer value of mlt
# no variation in the median, all values should be the same
check = []
for i, y in enumerate(dummy_y[:-1]):
assert np.all(dummy_val[i, :] == y.astype(int))
assert np.all(dummy_dev[i, :] == 0)
for i, x in enumerate(dummy_x[:-1]):
assert np.all(dummy2_val[:, i] == x/15.0)
assert np.all(dummy2_dev[:, i] == 0)
for i, x in enumerate(dummy_x[:-1]):
check.append(np.all(dummy3_val[:, i] == x/15.0 * 1000.0
+ dummy_y[:-1]))
check.append(np.all(dummy3_dev[:, i] == 0))
# holds here because there are 32 days, no data is discarded,
# each day holds same amount of data
assert(self.testInst.data['dummy1'].size*3 ==
sum([sum(i) for i in results['dummy1']['count']]))
assert np.all(check)
def test_basic_daily_mean(self):
""" Test basic daily mean"""
self.testInst.bounds = self.bounds1
ans = avg.mean_by_day(self.testInst, 'dummy4')
assert np.all(ans == 86399/2.0)
def test_basic_orbit_mean(self):
"""Test basic orbital mean"""
orbit_info = {'kind': 'local time', 'index': 'mlt'}
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
orbit_info=orbit_info)
self.testInst.bounds = self.bounds2
ans = avg.mean_by_orbit(self.testInst, 'mlt')
# note last orbit is incomplete thus not expected to satisfy relation
assert np.allclose(ans[:-1], np.ones(len(ans)-1)*12.0, 1.0E-2)
def test_basic_file_mean(self):
"""Test basic file mean"""
index = pds.date_range(*self.bounds1)
names = [date.strftime('%Y-%m-%d')+'.nofile' for date in index]
self.testInst.bounds = (names[0], names[-1])
ans = avg.mean_by_file(self.testInst, 'dummy4')
assert np.all(ans == 86399/2.0)
class TestDeprecation():
def setup(self):
"""Runs before every method to create a clean testing setup"""
warnings.simplefilter("always")
def teardown(self):
"""Runs after every method to clean up previous testing"""
def test_median1D_deprecation_warning(self):
"""Test generation of deprecation warning for median1D"""
with warnings.catch_warnings(record=True) as war:
try:
avg.median1D(None, [0., 360., 24.],
'longitude', ['dummy1'])
except ValueError:
# Setting inst to None should produce a ValueError after
# warning is generated
pass
assert len(war) >= 1
assert war[0].category == DeprecationWarning
def test_median2D_deprecation_warning(self):
"""Test generation of deprecation warning for median1D"""
with warnings.catch_warnings(record=True) as war:
try:
avg.median2D(None, [0., 360., 24.], 'longitude',
[0., 24., 24.], 'mlt', ['dummy1'])
except ValueError:
# Setting inst to None should produce a ValueError after
# warning is generated
pass
assert len(war) >= 1
assert war[0].category == DeprecationWarning
def test_mean_by_day_deprecation_warning(self):
"""Test generation of deprecation warning for mean_by_day"""
with warnings.catch_warnings(record=True) as war:
try:
avg.mean_by_day(None, 'dummy1')
except TypeError:
# Setting inst to None should produce a TypeError after
# warning is generated
pass
assert len(war) >= 1
assert war[0].category == DeprecationWarning
def test_mean_by_orbit_deprecation_warning(self):
"""Test generation of deprecation warning for mean_by_orbit"""
with warnings.catch_warnings(record=True) as war:
try:
avg.mean_by_orbit(None, 'dummy1')
except AttributeError:
# Setting inst to None should produce a AttributeError after
# warning is generated
pass
assert len(war) >= 1
assert war[0].category == DeprecationWarning
def test_mean_by_file_deprecation_warning(self):
"""Test generation of deprecation warning for mean_by_file"""
with warnings.catch_warnings(record=True) as war:
try:
avg.mean_by_file(None, 'dummy1')
except TypeError:
# Setting inst to None should produce a TypeError after
# warning is generated
pass
assert len(war) >= 1
assert war[0].category == DeprecationWarning
class TestFrameProfileAverages():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing2D',
clean_level='clean')
self.testInst.bounds = (pysat.datetime(2008, 1, 1),
pysat.datetime(2008, 1, 3))
self.dname = 'alt_profiles'
self.test_vals = np.arange(50) * 1.2
self.test_fracs = np.arange(50) / 50.0
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.dname, self.test_vals, self.test_fracs
def test_basic_seasonal_2Dmedian(self):
""" Test the basic seasonal 2D median"""
results = avg.median2D(self.testInst, [0., 360., 24.], 'longitude',
[0., 24, 24], 'mlt', [self.dname])
# iterate over all
# no variation in the median, all values should be the same
for i, row in enumerate(results[self.dname]['median']):
for j, item in enumerate(row):
assert np.all(item['density'] == self.test_vals)
assert np.all(item['fraction'] == self.test_fracs)
for i, row in enumerate(results[self.dname]['avg_abs_dev']):
for j, item in enumerate(row):
assert np.all(item['density'] == 0)
assert np.all(item['fraction'] == 0)
def test_basic_seasonal_1Dmedian(self):
""" Test the basic seasonal 1D median"""
results = avg.median1D(self.testInst, [0., 24, 24], 'mlt',
[self.dname])
# iterate over all
# no variation in the median, all values should be the same
for i, row in enumerate(results[self.dname]['median']):
assert np.all(row['density'] == self.test_vals)
assert np.all(row['fraction'] == self.test_fracs)
for i, row in enumerate(results[self.dname]['avg_abs_dev']):
assert np.all(row['density'] == 0)
assert np.all(row['fraction'] == 0)
class TestSeriesProfileAverages():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing2D',
clean_level='clean')
self.testInst.bounds = (pysat.datetime(2008, 1, 1),
pysat.datetime(2008, 2, 1))
self.dname = 'series_profiles'
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.dname
def test_basic_seasonal_median2D(self):
""" Test basic seasonal 2D median"""
results = avg.median2D(self.testInst, [0., 360., 24.], 'longitude',
[0., 24, 24], 'mlt', [self.dname])
# iterate over all
# no variation in the median, all values should be the same
test_vals = np.arange(50) * 1.2
for i, row in enumerate(results[self.dname]['median']):
for j, item in enumerate(row):
assert np.all(item == test_vals)
for i, row in enumerate(results[self.dname]['avg_abs_dev']):
for j, item in enumerate(row):
assert np.all(item == 0)
def test_basic_seasonal_median1D(self):
""" Test basic seasonal 1D median"""
results = avg.median1D(self.testInst, [0., 24, 24], 'mlt',
[self.dname])
# iterate over all
# no variation in the median, all values should be the same
test_vals = np.arange(50) * 1.2
for i, row in enumerate(results[self.dname]['median']):
assert np.all(row == test_vals)
for i, row in enumerate(results[self.dname]['avg_abs_dev']):
assert np.all(row == 0)
class TestConstellation:
def setup(self):
insts = []
for i in range(5):
insts.append(pysat.Instrument('pysat', 'testing',
clean_level='clean'))
self.testC = pysat.Constellation(instruments=insts)
self.testI = pysat.Instrument('pysat', 'testing', clean_level='clean')
self.bounds = (pysat.datetime(2008, 1, 1), pysat.datetime(2008, 1, 3))
def teardown(self):
del self.testC, self.testI, self.bounds
def test_constellation_median2D(self):
""" Test constellation implementation of 2D median"""
for i in self.testC.instruments:
i.bounds = self.bounds
self.testI.bounds = self.bounds
resultsC = avg.median2D(self.testC, [0., 360., 24.], 'longitude',
[0., 24, 24], 'mlt',
['dummy1', 'dummy2', 'dummy3'])
resultsI = avg.median2D(self.testI, [0., 360., 24.], 'longitude',
[0., 24, 24], 'mlt',
['dummy1', 'dummy2', 'dummy3'])
medC1 = resultsC['dummy1']['median']
medI1 = resultsI['dummy1']['median']
medC2 = resultsC['dummy2']['median']
medI2 = resultsI['dummy2']['median']
medC3 = resultsC['dummy3']['median']
medI3 = resultsI['dummy3']['median']
assert np.array_equal(medC1, medI1)
assert np.array_equal(medC2, medI2)
assert np.array_equal(medC3, medI3)
def test_constellation_median1D(self):
""" Test constellation implementation of 1D median"""
for i in self.testC.instruments:
i.bounds = self.bounds
self.testI.bounds = self.bounds
resultsC = avg.median1D(self.testC, [0., 24, 24], 'mlt',
['dummy1', 'dummy2', 'dummy3'])
resultsI = avg.median1D(self.testI, [0., 24, 24], 'mlt',
['dummy1', 'dummy2', 'dummy3'])
medC1 = resultsC['dummy1']['median']
medI1 = resultsI['dummy1']['median']
medC2 = resultsC['dummy2']['median']
medI2 = resultsI['dummy2']['median']
medC3 = resultsC['dummy3']['median']
medI3 = resultsI['dummy3']['median']
assert np.array_equal(medC1, medI1)
assert np.array_equal(medC2, medI2)
assert np.array_equal(medC3, medI3)
class TestHeterogenousConstellation:
def setup(self):
insts = []
for i in range(2):
r_date = pysat.datetime(2009, 1, i+1)
insts.append(pysat.Instrument('pysat', 'testing',
clean_level='clean',
root_date=r_date))
self.testC = pysat.Constellation(instruments=insts)
self.bounds = (pysat.datetime(2008, 1, 1), pysat.datetime(2008, 1, 3))
def teardown(self):
del self.testC, self.bounds
def test_heterogenous_constellation_median2D(self):
""" Test the seasonal 2D median of a heterogeneous constellation """
for inst in self.testC:
inst.bounds = self.bounds
results = avg.median2D(self.testC, [0., 360., 24.], 'longitude',
[0., 24, 24], 'mlt',
['dummy1', 'dummy2', 'dummy3'])
dummy_val = results['dummy1']['median']
dummy_dev = results['dummy1']['avg_abs_dev']
dummy2_val = results['dummy2']['median']
dummy2_dev = results['dummy2']['avg_abs_dev']
dummy3_val = results['dummy3']['median']
dummy3_dev = results['dummy3']['avg_abs_dev']
dummy_x = results['dummy1']['bin_x']
dummy_y = results['dummy1']['bin_y']
# iterate over all y rows
# value should be equal to integer value of mlt
# no variation in the median, all values should be the same
check = []
for i, y in enumerate(dummy_y[:-1]):
check.append(np.all(dummy_val[i, :] == y.astype(int)))
check.append(np.all(dummy_dev[i, :] == 0))
for i, x in enumerate(dummy_x[:-1]):
check.append(np.all(dummy2_val[:, i] == x/15.0))
check.append(np.all(dummy2_dev[:, i] == 0))
for i, x in enumerate(dummy_x[:-1]):
check.append(np.all(dummy3_val[:, i] == x/15.0 * 1000.0
+ dummy_y[:-1]))
check.append(np.all(dummy3_dev[:, i] == 0))
assert np.all(check)
def test_heterogenous_constellation_median1D(self):
""" Test the seasonal 1D median of a heterogeneous constellation """
for inst in self.testC:
inst.bounds = self.bounds
results = avg.median1D(self.testC, [0., 24, 24], 'mlt', ['dummy1'])
# Extract the results
dummy_val = results['dummy1']['median']
dummy_dev = results['dummy1']['avg_abs_dev']
# iterate over all x rows
# value should be equal to integer value of mlt
# no variation in the median, all values should be the same
check = []
for i, x in enumerate(results['dummy1']['bin_x'][:-1]):
check.append(np.all(dummy_val[i] == x.astype(int)))
check.append(np.all(dummy_dev[i] == 0))
assert np.all(check)
class Test2DConstellation:
def setup(self):
insts = []
insts.append(pysat.Instrument('pysat', 'testing2d',
clean_level='clean'))
self.testC = pysat.Constellation(insts)
self.bounds = (pysat.datetime(2008, 1, 1), pysat.datetime(2008, 1, 3))
def teardown(self):
del self.testC, self.bounds
def test_2D_median(self):
""" Test a 2D median calculation with a constellation"""
for i in self.testC.instruments:
i.bounds = self.bounds
results = avg.median2D(self.testC, [0., 360., 24], 'longitude',
[0., 24, 24], 'slt', ['uts'])
dummy_val = results['uts']['median']
dummy_dev = results['uts']['avg_abs_dev']
dummy_y = results['uts']['bin_y']
# iterate over all y rows
# value should be equal to integer value of mlt
# no variation in the median, all values should be the same
check = []
for i, y in enumerate(dummy_y[:-1]):
check.append(np.all(dummy_val[i, :] == y.astype(int)))
check.append(np.all(dummy_dev[i, :] == 0))
def test_1D_median(self):
""" Test a 1D median calculation with a constellation"""
for i in self.testC.instruments:
i.bounds = self.bounds
results = avg.median1D(self.testC, [0., 24, 24], 'slt', ['uts'])
dummy_val = results['uts']['median']
dummy_dev = results['uts']['avg_abs_dev']
dummy_x = results['uts']['bin_x']
# iterate over all x rows
# value should be equal to integer value of slt
# no variation in the median, all values should be the same
check = []
for i, x in enumerate(dummy_x[:-1]):
check.append(np.all(dummy_val[i] == x.astype(int)))
check.append(np.all(dummy_dev[i] == 0))
class TestSeasonalAverageUnevenBins:
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean')
self.testInst.bounds = (pysat.datetime(2008, 1, 1),
pysat.datetime(2008, 1, 3))
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst
def test_seasonal_average_uneven_bins(self):
""" Test seasonal 2D median with uneven bins"""
results = avg.median2D(self.testInst, np.linspace(0., 360., 25),
'longitude', np.linspace(0., 24., 25), 'mlt',
['dummy1', 'dummy2', 'dummy3'], auto_bin=False)
dummy_val = results['dummy1']['median']
dummy_dev = results['dummy1']['avg_abs_dev']
dummy2_val = results['dummy2']['median']
dummy2_dev = results['dummy2']['avg_abs_dev']
dummy3_val = results['dummy3']['median']
dummy3_dev = results['dummy3']['avg_abs_dev']
dummy_x = results['dummy1']['bin_x']
dummy_y = results['dummy1']['bin_y']
# iterate over all y rows
# value should be equal to integer value of mlt
# no variation in the median, all values should be the same
check = []
for i, y in enumerate(dummy_y[:-1]):
assert np.all(dummy_val[i, :] == y.astype(int))
assert np.all(dummy_dev[i, :] == 0)
for i, x in enumerate(dummy_x[:-1]):
assert np.all(dummy2_val[:, i] == x/15.0)
assert np.all(dummy2_dev[:, i] == 0)
for i, x in enumerate(dummy_x[:-1]):
check.append(np.all(dummy3_val[:, i] == x/15.0 * 1000.0
+ dummy_y[:-1]))
check.append(np.all(dummy3_dev[:, i] == 0))
# holds here because there are 32 days, no data is discarded,
# each day holds same amount of data
assert(self.testInst.data['dummy1'].size*3 ==
sum([sum(i) for i in results['dummy1']['count']]))
assert np.all(check)
@raises(ValueError)
def test_nonmonotonic_bins(self):
"""Test 2D median failure when provided with a non-monotonic bins
"""
avg.median2D(self.testInst, np.array([0., 300., 100.]), 'longitude',
np.array([0., 24., 13.]), 'mlt',
['dummy1', 'dummy2', 'dummy3'], auto_bin=False)
@raises(TypeError)
def test_bin_data_depth(self):
"""Test failure when an array-like of length 1 is given to median2D
"""
avg.median2D(self.testInst, 1, 'longitude', 24, 'mlt',
['dummy1', 'dummy2', 'dummy3'], auto_bin=False)
@raises(TypeError)
def test_bin_data_type(self):
"""Test failure when a non array-like is given to median2D
"""
avg.median2D(self.testInst, ['1', 'a', '23', '10'], 'longitude',
['0', 'd', '24', 'c'], 'mlt',
['dummy1', 'dummy2', 'dummy3'], auto_bin=False)
class TestInstMed1D():
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean',
update_files=True)
self.testInst.bounds = (pysat.datetime(2008, 1, 1),
pysat.datetime(2008, 1, 31))
self.test_bins = [0, 24, 24]
self.test_label = 'slt'
self.test_data = ['dummy1', 'dummy2']
self.out_keys = ['count', 'avg_abs_dev', 'median', 'bin_x']
self.out_data = {'dummy1':
{'count': [111780., 111320., 111780., 111320.,
111780., 111320., 111780., 111320.,
111780., 111320., 111780., 111320.,
111780., 111320., 111918., 111562.,
112023., 111562., 112023., 111412.,
111780., 111320., 111780., 111320.],
'avg_abs_dev': np.zeros(shape=24),
'median': np.linspace(0.0, 23.0, 24.0)},
'dummy2':
{'count': [111780., 111320., 111780., 111320.,
111780., 111320., 111780., 111320.,
111780., 111320., 111780., 111320.,
111780., 111320., 111918., 111562.,
112023., 111562., 112023., 111412.,
111780., 111320., 111780., 111320.],
'avg_abs_dev': np.zeros(shape=24) + 6.0,
'median': [11., 12., 11., 11., 12., 11., 12., 11.,
12., 12., 11., 12., 11., 12., 11., 11.,
12., 11., 12., 11., 11., 11., 11., 12.]}}
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.testInst, self.test_bins, self.test_label, self.test_data
del self.out_keys, self.out_data
def test_median1D_default(self):
"""Test success of median1D with default options"""
med_dict = avg.median1D(self.testInst, self.test_bins, self.test_label,
self.test_data)
# Test output type
assert isinstance(med_dict, dict)
assert len(med_dict.keys()) == len(self.test_data)
# Test output keys
for kk in med_dict.keys():
assert kk in self.test_data
assert np.all([jj in self.out_keys
for jj in med_dict[kk].keys()])
# Test output values
for jj in self.out_keys[:-1]:
assert len(med_dict[kk][jj]) == self.test_bins[-1]
assert np.all(med_dict[kk][jj] == self.out_data[kk][jj])
jj = self.out_keys[-1]
assert len(med_dict[kk][jj]) == self.test_bins[-1]+1
assert np.all(med_dict[kk][jj] == np.linspace(self.test_bins[0],
self.test_bins[1],
self.test_bins[2]+1))
del med_dict, kk, jj
@raises(KeyError)
def test_median1D_bad_data(self):
"""Test failure of median1D with string data instead of list"""
avg.median1D(self.testInst, self.test_bins, self.test_label,
self.test_data[0])
@raises(KeyError)
def test_median1D_bad_label(self):
"""Test failure of median1D with unknown label"""
avg.median1D(self.testInst, self.test_bins, "bad_label",
self.test_data)
@raises(ValueError)
def test_nonmonotonic_bins(self):
"""Test median1D failure when provided with a non-monotonic bins
"""
avg.median1D(self.testInst, [0, 13, 5], self.test_label,
self.test_data, auto_bin=False)
@raises(TypeError)
def test_bin_data_depth(self):
"""Test failure when array-like of length 1 is given to median1D
"""
avg.median1D(self.testInst, 24, self.test_label, self.test_data,
auto_bin=False)
@raises(TypeError)
def test_bin_data_type(self):
"""Test failure when median 1D is given non array-like bins
"""
pysat.ssnl.avg.median2D(self.testInst, ['0', 'd', '24', 'c'],
self.test_label, self.test_data,
auto_bin=False)
| |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from robot.utils import py2to3
from .comments import CommentCache, Comments
from .settings import Documentation, MetadataList
class Populator(object):
"""Explicit interface for all populators."""
def add(self, row):
raise NotImplementedError
def populate(self):
raise NotImplementedError
@py2to3
class NullPopulator(Populator):
def add(self, row):
pass
def populate(self):
pass
def __nonzero__(self):
return False
class _TablePopulator(Populator):
def __init__(self, table):
self._table = table
self._populator = NullPopulator()
self._comment_cache = CommentCache()
def add(self, row):
if self._is_cacheable_comment_row(row):
self._comment_cache.add(row)
else:
self._add(row)
def _is_cacheable_comment_row(self, row):
return row.is_commented()
def _add(self, row):
if self._is_continuing(row):
self._consume_comments()
else:
self._populator.populate()
self._populator = self._get_populator(row)
self._consume_standalone_comments()
self._populator.add(row)
def _is_continuing(self, row):
return row.is_continuing() and self._populator
def _get_populator(self, row):
raise NotImplementedError
def _consume_comments(self):
self._comment_cache.consume_with(self._populator.add)
def _consume_standalone_comments(self):
self._consume_comments()
def populate(self):
self._consume_comments()
self._populator.populate()
class SettingTablePopulator(_TablePopulator):
def _get_populator(self, row):
setter = self._table.get_setter(row.head)
if not setter:
return NullPopulator()
if isinstance(setter.__self__, Documentation):
return DocumentationPopulator(setter)
if isinstance(setter.__self__, MetadataList):
return MetadataPopulator(setter)
return SettingPopulator(setter)
class VariableTablePopulator(_TablePopulator):
def _get_populator(self, row):
return VariablePopulator(self._table.add, row.head)
def _consume_standalone_comments(self):
self._comment_cache.consume_with(self._populate_standalone_comment)
def _populate_standalone_comment(self, comment):
populator = self._get_populator(comment)
populator.add(comment)
populator.populate()
def populate(self):
self._populator.populate()
self._consume_standalone_comments()
class _StepContainingTablePopulator(_TablePopulator):
def _is_continuing(self, row):
return row.is_indented() and self._populator or row.is_commented()
def _is_cacheable_comment_row(self, row):
return row.is_commented() and not self._populator
class TestTablePopulator(_StepContainingTablePopulator):
def _get_populator(self, row):
return TestCasePopulator(self._table.add)
class KeywordTablePopulator(_StepContainingTablePopulator):
def _get_populator(self, row):
return UserKeywordPopulator(self._table.add)
class ForLoopPopulator(Populator):
def __init__(self, for_loop_creator):
self._for_loop_creator = for_loop_creator
self._loop = None
self._populator = NullPopulator()
self._declaration = []
self._declaration_comments = []
def add(self, row):
dedented_row = row.dedent()
if not self._loop:
declaration_ready = self._populate_declaration(row)
if not declaration_ready:
return
self._create_for_loop()
if not row.is_continuing():
self._populator.populate()
self._populator = StepPopulator(self._loop.add_step)
self._populator.add(dedented_row)
def _populate_declaration(self, row):
if row.starts_for_loop() or row.is_continuing():
self._declaration.extend(row.dedent().data)
self._declaration_comments.extend(row.comments)
return False
return True
def _create_for_loop(self):
self._loop = self._for_loop_creator(self._declaration,
self._declaration_comments)
def populate(self):
if not self._loop:
self._create_for_loop()
self._populator.populate()
class _TestCaseUserKeywordPopulator(Populator):
def __init__(self, test_or_uk_creator):
self._test_or_uk_creator = test_or_uk_creator
self._test_or_uk = None
self._populator = NullPopulator()
self._comment_cache = CommentCache()
def add(self, row):
if row.is_commented():
self._comment_cache.add(row)
return
if not self._test_or_uk:
self._test_or_uk = self._test_or_uk_creator(row.head)
dedented_row = row.dedent()
if dedented_row:
self._handle_data_row(dedented_row)
def _handle_data_row(self, row):
if not self._continues(row):
self._populator.populate()
self._populator = self._get_populator(row)
self._comment_cache.consume_with(self._populate_comment_row)
else:
self._comment_cache.consume_with(self._populator.add)
self._populator.add(row)
def _populate_comment_row(self, crow):
populator = StepPopulator(self._test_or_uk.add_step)
populator.add(crow)
populator.populate()
def populate(self):
self._populator.populate()
self._comment_cache.consume_with(self._populate_comment_row)
def _get_populator(self, row):
if row.starts_test_or_user_keyword_setting():
setter = self._setting_setter(row)
if not setter:
return NullPopulator()
if isinstance(setter.__self__, Documentation):
return DocumentationPopulator(setter)
return SettingPopulator(setter)
if row.starts_for_loop():
return ForLoopPopulator(self._test_or_uk.add_for_loop)
return StepPopulator(self._test_or_uk.add_step)
def _continues(self, row):
return row.is_continuing() and self._populator or \
(isinstance(self._populator, ForLoopPopulator) and row.is_indented())
def _setting_setter(self, row):
setting_name = row.test_or_user_keyword_setting_name()
return self._test_or_uk.get_setter(setting_name)
class TestCasePopulator(_TestCaseUserKeywordPopulator):
_item_type = 'test case'
class UserKeywordPopulator(_TestCaseUserKeywordPopulator):
_item_type = 'keyword'
class _PropertyPopulator(Populator):
def __init__(self, setter):
self._setter = setter
self._value = []
self._comments = Comments()
self._data_added = False
def add(self, row):
if not row.is_commented():
self._add(row)
self._comments.add(row)
def _add(self, row):
self._value.extend(row.tail if not self._data_added else row.data)
self._data_added = True
class VariablePopulator(_PropertyPopulator):
def __init__(self, setter, name):
_PropertyPopulator.__init__(self, setter)
self._name = name
def populate(self):
self._setter(self._name, self._value, self._comments.value)
class SettingPopulator(_PropertyPopulator):
def populate(self):
self._setter(self._value, self._comments.value)
class DocumentationPopulator(_PropertyPopulator):
_end_of_line_escapes = re.compile(r'(\\+)n?$')
def populate(self):
self._setter(self._value, self._comments.value)
def _add(self, row):
self._add_to_value(row.dedent().data)
def _add_to_value(self, data):
joiner = self._row_joiner()
if joiner:
self._value.append(joiner)
self._value.append(' '.join(data))
def _row_joiner(self):
if self._is_empty():
return None
return self._joiner_based_on_eol_escapes()
def _is_empty(self):
return not self._value or \
(len(self._value) == 1 and self._value[0] == '')
def _joiner_based_on_eol_escapes(self):
match = self._end_of_line_escapes.search(self._value[-1])
if not match or len(match.group(1)) % 2 == 0:
return '\\n'
if not match.group(0).endswith('n'):
return ' '
return None
class MetadataPopulator(DocumentationPopulator):
def __init__(self, setter):
_PropertyPopulator.__init__(self, setter)
self._name = None
def populate(self):
self._setter(self._name, self._value, self._comments.value)
def _add(self, row):
data = row.dedent().data
if self._name is None:
self._name = data[0] if data else ''
data = data[1:]
self._add_to_value(data)
class StepPopulator(_PropertyPopulator):
def _add(self, row):
self._value.extend(row.data)
def populate(self):
if self._value or self._comments:
self._setter(self._value, self._comments.value)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ________________________________________________________________________
#
# Copyright (C) 2014 Andrew Fullford
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ________________________________________________________________________
#
import os, sys, logging, time, fcntl
import taskforce.poll
from taskforce.utils import deltafmt
import support
from support import get_caller as my
env = support.env(base='.')
# Find possible polling modes
known_polling_modes = set(taskforce.poll.__dict__[mode] for mode in taskforce.poll.__dict__ if mode.startswith('PL_'))
class Test(object):
@classmethod
def setUpAll(self, mode=None):
self.log = support.logger()
self.log.info("%s started", self.__module__)
self.poll_fd = None
self.poll_send = None
@classmethod
def tearDownAll(self):
self.log.info("%s ended", self.__module__)
def self_pipe(self):
"""
A self-pipe is a convenient way of exercising some polling
"""
self.poll_fd, self.poll_send = os.pipe()
for fd in [self.poll_fd, self.poll_send]:
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.log.info("%s poll_fd = %d, poll_send = %d", my(self), self.poll_fd, self.poll_send)
return (self.poll_fd, self.poll_send)
def close_pipe(self):
if self.poll_fd is not None:
try: os.close(self.poll_fd)
except: pass
self.poll_fd = None
if self.poll_send is not None:
try: os.close(self.poll_send)
except: pass
self.poll_send = None
def dump_evlist(self, poll, tag, evlist):
if evlist:
self.log.info("%s Event list from %s ...", my(self), tag)
for fd, ev in evlist:
self.log.info(" %s on fd %s", poll.get_event(ev), str(fd))
else:
self.log.info("%s Event list from %s is empty", my(self), tag)
def Test_A_mode(self):
log_level = self.log.getEffectiveLevel()
poll = taskforce.poll.poll()
mode = poll.get_mode()
allowed_modes = poll.get_available_modes()
self.log.info("%s Default polling mode is '%s' of %s",
my(self), poll.get_mode_name(mode=mode), str(poll.get_available_mode_names()))
# get_mode_name() should always return a string
#
name = poll.get_mode_name(mode='junk')
self.log.info("%s get_mode_name() response to invalid mode %s", my(self), name)
assert type(name) is str
# Format multiple events
#
evtext = poll.get_event(taskforce.poll.POLLIN|taskforce.poll.POLLOUT)
self.log.info("%s get_event() response to multiple events %s", my(self), evtext)
assert type(name) is str
# Format bad events
#
evtext = poll.get_event(taskforce.poll.POLLIN|taskforce.poll.POLLOUT|0x800)
self.log.info("%s get_event() response to multiple events %s", my(self), evtext)
assert type(name) is str
# Invalid event input
#
try:
# Mask the log message as we expect a failure
self.log.setLevel(logging.CRITICAL)
poll.get_event(None)
self.log.setLevel(log_level)
expected_error_occurred = False
except Exception as e:
self.log.setLevel(log_level)
self.log.info("%s Received expected invalid event error -- %s", my(self), str(e))
expected_error_occurred = True
assert expected_error_occurred
# Should always be able to force PL_SELECT
poll.set_mode(taskforce.poll.PL_SELECT)
assert poll.get_mode() == taskforce.poll.PL_SELECT
# Find a mode that is not available
bad_mode = None
for mode in known_polling_modes:
if mode not in allowed_modes:
bad_mode = mode
break
self.log.info("%s Determined unavailable mode as %s", my(self), poll.get_mode_name(mode=bad_mode))
# Check that we can't set mode to None
#
try:
# Mask the log message as we expect a failure
self.log.setLevel(logging.CRITICAL)
poll.set_mode(None)
self.log.setLevel(log_level)
expected_error_occurred = False
except Exception as e:
self.log.setLevel(log_level)
self.log.info("%s Received expected error -- %s", my(self), str(e))
expected_error_occurred = True
assert expected_error_occurred
# Check that we can't set mode to an impossible value
#
try:
# Mask the log message as we expect a failure
self.log.setLevel(logging.CRITICAL)
poll.set_mode(-1)
self.log.setLevel(log_level)
expected_error_occurred = False
except Exception as e:
self.log.setLevel(log_level)
self.log.info("%s Received expected error -- %s", my(self), str(e))
expected_error_occurred = True
assert expected_error_occurred
# Check that we can't set an unavailable mode
#
try:
# Mask the log message as we expect a failure
self.log.setLevel(logging.CRITICAL)
poll.set_mode(bad_mode)
self.log.setLevel(log_level)
expected_error_occurred = False
except Exception as e:
self.log.setLevel(log_level)
self.log.info("%s Received expected error -- %s", my(self), str(e))
expected_error_occurred = True
assert expected_error_occurred
def Test_B_register(self):
log_level = self.log.getEffectiveLevel()
poll = taskforce.poll.poll()
currmode = poll.get_mode()
self.log.info("%s Default polling mode is '%s' of %s",
my(self), poll.get_mode_name(mode=currmode), str(poll.get_available_mode_names()))
# Find a valid mode that is not the current mode
#
nextmode = None
for mode in poll.get_available_modes():
if mode != currmode:
nextmode = mode
self.log.info("%s Determined valid non-active mode as %s", my(self), poll.get_mode_name(mode=nextmode))
poll_fd, poll_send = self.self_pipe()
poll.register(poll_fd)
# Test that an attempt to change mode is rejected
#
try:
# Mask the log message as we expect a failure
self.log.setLevel(logging.CRITICAL)
poll.set_mode(nextmode)
self.log.setLevel(log_level)
expected_error_occurred = False
except Exception as e:
self.log.setLevel(log_level)
self.log.info("%s Received expected error -- %s", my(self), str(e))
expected_error_occurred = True
assert expected_error_occurred
# Test that an attempt to register an invalid fd fails
#
inv_fd = 999
# Make sure it is invalid
try: os.close(inv_fd)
except: pass
try:
# Mask the log message as we expect a failure
self.log.setLevel(logging.CRITICAL)
poll.register(inv_fd)
self.log.setLevel(log_level)
expected_error_occurred = False
except Exception as e:
self.log.setLevel(log_level)
self.log.info("%s Received expected invalid fd error -- %s", my(self), str(e))
expected_error_occurred = True
assert expected_error_occurred
# Confirm new mode is same as previous
poll = taskforce.poll.poll()
mode = poll.get_mode()
self.log.info("%s Default polling mode is '%s' and should be same as previous '%s'",
my(self), poll.get_mode_name(mode=mode), poll.get_mode_name(mode=currmode))
# Change to PL_SELECT and register
poll.set_mode(taskforce.poll.PL_SELECT)
assert poll.get_mode() == taskforce.poll.PL_SELECT
poll.register(poll_fd)
# Check invalid unregister
#
try:
# Mask the log message as we expect a failure
self.log.setLevel(logging.CRITICAL)
poll.unregister(self)
self.log.setLevel(log_level)
expected_error_occurred = False
except Exception as e:
self.log.setLevel(log_level)
self.log.info("%s Received expected error -- %s", my(self), str(e))
expected_error_occurred = True
assert expected_error_occurred
# Check valid unregister
#
poll.unregister(poll_fd)
self.close_pipe()
def Test_C_poll(self):
log_level = self.log.getEffectiveLevel()
poll_fd, poll_send = self.self_pipe()
poll = taskforce.poll.poll()
poll.register(poll_fd, taskforce.poll.POLLIN)
# Check active poll
os.write(poll_send, '\0'.encode('utf-8'))
evlist = poll.poll(timeout=30)
self.dump_evlist(poll, 'active poll', evlist)
assert evlist
assert len(os.read(poll_fd, 10)) == 1
# Check timeout
evlist = poll.poll(timeout=30)
self.dump_evlist(poll, 'timeout poll', evlist)
assert evlist == []
# Check timeout accuracy
start = time.time()
delay = 500
evlist = poll.poll(timeout=500)
self.dump_evlist(poll, 'timeout accuracy', evlist)
assert evlist == []
delta = abs(time.time() - start - delay/1000.0)
self.log.info("%s poll timeout delta from wall clock %s", my(self), deltafmt(delta, decimals=6))
assert delta < 0.1
if poll.get_mode() == taskforce.poll.PL_SELECT:
self.log.warning("%s Default mode is PL_SELECT so retest skipped", my(self))
else:
poll = taskforce.poll.poll()
poll.set_mode(taskforce.poll.PL_SELECT)
poll.register(poll_fd, taskforce.poll.POLLIN)
# Check active poll
os.write(poll_send, '\0'.encode('utf-8'))
evlist = poll.poll(timeout=30)
self.dump_evlist(poll, 'select active poll', evlist)
assert evlist
assert len(os.read(poll_fd, 10)) == 1
# Check timeout
evlist = poll.poll(timeout=30)
self.dump_evlist(poll, 'select timeout poll', evlist)
assert evlist == []
# Check timeout accuracy
start = time.time()
delay = 500
evlist = poll.poll(timeout=500)
self.dump_evlist(poll, 'select timeout accuracy', evlist)
assert evlist == []
delta = abs(time.time() - start - delay/1000.0)
self.log.info("%s select poll timeout delta from wall clock %s", my(self), deltafmt(delta, decimals=6))
assert delta < 0.1
self.close_pipe()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = unicode = str
from py4j.java_gateway import JavaClass
from pyspark import RDD, since, keyword_only
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.column import _to_seq
from pyspark.sql.types import *
from pyspark.sql import utils
__all__ = ["DataFrameReader", "DataFrameWriter"]
def to_str(value):
"""
A wrapper over str(), but converts bool values to lower case strings.
If None is given, just returns None, instead of converting it to string "None".
"""
if isinstance(value, bool):
return str(value).lower()
elif value is None:
return value
else:
return str(value)
class OptionUtils(object):
def _set_opts(self, schema=None, **options):
"""
Set named options (filter out those the value is None)
"""
if schema is not None:
self.schema(schema)
for k, v in options.items():
if v is not None:
self.option(k, v)
class DataFrameReader(OptionUtils):
"""
Interface used to load a :class:`DataFrame` from external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`spark.read`
to access this.
.. versionadded:: 1.4
"""
def __init__(self, spark):
self._jreader = spark._ssql_ctx.read()
self._spark = spark
def _df(self, jdf):
from pyspark.sql.dataframe import DataFrame
return DataFrame(jdf, self._spark)
@since(1.4)
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._jreader = self._jreader.format(source)
return self
@since(1.4)
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
(For example ``col0 INT, col1 DOUBLE``).
"""
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
if isinstance(schema, StructType):
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
elif isinstance(schema, basestring):
self._jreader = self._jreader.schema(schema)
else:
raise TypeError("schema should be StructType or string")
return self
@since(1.5)
def option(self, key, value):
"""Adds an input option for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
@since(1.4)
def load(self, path=None, format=None, schema=None, **options):
"""Loads data from a data source and returns it as a :class`DataFrame`.
:param path: optional string or a list of string for file-system backed data sources.
:param format: optional string for format of the data source. Default to 'parquet'.
:param schema: optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param options: all other string options
>>> df = spark.read.load('python/test_support/sql/parquet_partitioned', opt1=True,
... opt2=1, opt3='str')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
>>> df = spark.read.format('json').load(['python/test_support/sql/people.json',
... 'python/test_support/sql/people1.json'])
>>> df.dtypes
[('age', 'bigint'), ('aka', 'string'), ('name', 'string')]
"""
if format is not None:
self.format(format)
if schema is not None:
self.schema(schema)
self.options(**options)
if isinstance(path, basestring):
return self._df(self._jreader.load(path))
elif path is not None:
if type(path) != list:
path = [path]
return self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path)))
else:
return self._df(self._jreader.load())
@since(1.4)
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None,
allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None,
mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None,
multiLine=None):
"""
Loads JSON files and returns the results as a :class:`DataFrame`.
`JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default.
For JSON (one record per file), set the ``multiLine`` parameter to ``true``.
If the ``schema`` parameter is not specified, this function goes
through the input once to determine the input schema.
:param path: string represents path to the JSON dataset, or a list of paths,
or RDD of Strings storing JSON objects.
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or
a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param primitivesAsString: infers all primitive values as a string type. If None is set,
it uses the default value, ``false``.
:param prefersDecimal: infers all floating-point values as a decimal type. If the values
do not fit in decimal, then it infers them as doubles. If None is
set, it uses the default value, ``false``.
:param allowComments: ignores Java/C++ style comment in JSON records. If None is set,
it uses the default value, ``false``.
:param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set,
it uses the default value, ``false``.
:param allowSingleQuotes: allows single quotes in addition to double quotes. If None is
set, it uses the default value, ``true``.
:param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is
set, it uses the default value, ``false``.
:param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character
using backslash quoting mechanism. If None is
set, it uses the default value, ``false``.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : sets other fields to ``null`` when it meets a corrupted \
record, and puts the malformed string into a field configured by \
``columnNameOfCorruptRecord``. To keep corrupt records, an user can set \
a string type field named ``columnNameOfCorruptRecord`` in an user-defined \
schema. If a schema does not have the field, it drops corrupt records during \
parsing. When inferring a schema, it implicitly adds a \
``columnNameOfCorruptRecord`` field in an output schema.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param multiLine: parse one record, which may span multiple lines, per file. If None is
set, it uses the default value, ``false``.
>>> df1 = spark.read.json('python/test_support/sql/people.json')
>>> df1.dtypes
[('age', 'bigint'), ('name', 'string')]
>>> rdd = sc.textFile('python/test_support/sql/people.json')
>>> df2 = spark.read.json(rdd)
>>> df2.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._set_opts(
schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal,
allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames,
allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero,
allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter,
mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat,
timestampFormat=timestampFormat, multiLine=multiLine)
if isinstance(path, basestring):
path = [path]
if type(path) == list:
return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path)))
elif isinstance(path, RDD):
def func(iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = path.mapPartitions(func)
keyed._bypass_serializer = True
jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString())
return self._df(self._jreader.json(jrdd))
else:
raise TypeError("path can be only string, list or RDD")
@since(1.4)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:param tableName: string, name of the table.
>>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.createOrReplaceTempView('tmpTable')
>>> spark.read.table('tmpTable').dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.table(tableName))
@since(1.4)
def parquet(self, *paths):
"""Loads Parquet files, returning the result as a :class:`DataFrame`.
You can set the following Parquet-specific option(s) for reading Parquet files:
* ``mergeSchema``: sets whether we should merge schemas collected from all \
Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \
The default value is specified in ``spark.sql.parquet.mergeSchema``.
>>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.parquet(_to_seq(self._spark._sc, paths)))
@ignore_unicode_prefix
@since(1.6)
def text(self, paths):
"""
Loads text files and returns a :class:`DataFrame` whose schema starts with a
string column named "value", and followed by partitioned columns if there
are any.
Each line in the text file is a new row in the resulting DataFrame.
:param paths: string, or list of strings, for input path(s).
>>> df = spark.read.text('python/test_support/sql/text-test.txt')
>>> df.collect()
[Row(value=u'hello'), Row(value=u'this')]
"""
if isinstance(paths, basestring):
paths = [paths]
return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths)))
@since(2.0)
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None,
comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None,
ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None,
negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None,
maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None,
columnNameOfCorruptRecord=None, multiLine=None):
"""Loads a CSV file and returns the result as a :class:`DataFrame`.
This function will go through the input once to determine the input schema if
``inferSchema`` is enabled. To avoid going through the entire data once, disable
``inferSchema`` option or specify the schema explicitly using ``schema``.
:param path: string, or list of strings, for input path(s).
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param sep: sets the single character as a separator for each field and value.
If None is set, it uses the default value, ``,``.
:param encoding: decodes the CSV files by the given encoding type. If None is set,
it uses the default value, ``UTF-8``.
:param quote: sets the single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If you would like to turn off quotations, you need to set an
empty string.
:param escape: sets the single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``.
:param comment: sets the single character used for skipping lines beginning with this
character. By default (None), it is disabled.
:param header: uses the first line as names of columns. If None is set, it uses the
default value, ``false``.
:param inferSchema: infers the input schema automatically from data. It requires one extra
pass over the data. If None is set, it uses the default value, ``false``.
:param ignoreLeadingWhiteSpace: A flag indicating whether or not leading whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param ignoreTrailingWhiteSpace: A flag indicating whether or not trailing whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string. Since 2.0.1, this ``nullValue`` param
applies to all supported types including the string type.
:param nanValue: sets the string representation of a non-number value. If None is set, it
uses the default value, ``NaN``.
:param positiveInf: sets the string representation of a positive infinity value. If None
is set, it uses the default value, ``Inf``.
:param negativeInf: sets the string representation of a negative infinity value. If None
is set, it uses the default value, ``Inf``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param maxColumns: defines a hard limit of how many columns a record can have. If None is
set, it uses the default value, ``20480``.
:param maxCharsPerColumn: defines the maximum number of characters allowed for any given
value being read. If None is set, it uses the default value,
``-1`` meaning unlimited length.
:param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0.
If specified, it is ignored.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : sets other fields to ``null`` when it meets a corrupted \
record, and puts the malformed string into a field configured by \
``columnNameOfCorruptRecord``. To keep corrupt records, an user can set \
a string type field named ``columnNameOfCorruptRecord`` in an \
user-defined schema. If a schema does not have the field, it drops corrupt \
records during parsing. When a length of parsed CSV tokens is shorter than \
an expected length of a schema, it sets `null` for extra fields.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param multiLine: parse records, which may span multiple lines. If None is
set, it uses the default value, ``false``.
>>> df = spark.read.csv('python/test_support/sql/ages.csv')
>>> df.dtypes
[('_c0', 'string'), ('_c1', 'string')]
"""
self._set_opts(
schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment,
header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue,
nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf,
dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns,
maxCharsPerColumn=maxCharsPerColumn,
maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode,
columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine)
if isinstance(path, basestring):
path = [path]
return self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path)))
@since(1.5)
def orc(self, path):
"""Loads ORC files, returning the result as a :class:`DataFrame`.
.. note:: Currently ORC support is only available together with Hive support.
>>> df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> df.dtypes
[('a', 'bigint'), ('b', 'int'), ('c', 'int')]
"""
if isinstance(path, basestring):
path = [path]
return self._df(self._jreader.orc(_to_seq(self._spark._sc, path)))
@since(1.4)
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
predicates=None, properties=None):
"""
Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
Partitions of the table will be retrieved in parallel if either ``column`` or
``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions``
is needed when ``column`` is specified.
If both ``column`` and ``predicates`` are specified, ``column`` will be used.
.. note:: Don't create too many partitions in parallel on a large cluster; \
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: the name of the table
:param column: the name of an integer column that will be used for partitioning;
if this parameter is specified, then ``numPartitions``, ``lowerBound``
(inclusive), and ``upperBound`` (exclusive) will form partition strides
for generated WHERE clause expressions used to split the column
``column`` evenly
:param lowerBound: the minimum value of ``column`` used to decide partition stride
:param upperBound: the maximum value of ``column`` used to decide partition stride
:param numPartitions: the number of partitions
:param predicates: a list of expressions suitable for inclusion in WHERE clauses;
each one defines one partition of the :class:`DataFrame`
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
:return: a DataFrame
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
if column is not None:
assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
assert numPartitions is not None, \
"numPartitions can not be None when ``column`` is specified"
return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound),
int(numPartitions), jprop))
if predicates is not None:
gateway = self._spark._sc._gateway
jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates)
return self._df(self._jreader.jdbc(url, table, jpredicates, jprop))
return self._df(self._jreader.jdbc(url, table, jprop))
class DataFrameWriter(OptionUtils):
"""
Interface used to write a :class:`DataFrame` to external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`DataFrame.write`
to access this.
.. versionadded:: 1.4
"""
def __init__(self, df):
self._df = df
self._spark = df.sql_ctx
self._jwrite = df._jdf.write()
def _sq(self, jsq):
from pyspark.sql.streaming import StreamingQuery
return StreamingQuery(jsq)
@since(1.4)
def mode(self, saveMode):
"""Specifies the behavior when data or table already exists.
Options include:
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
# At the JVM side, the default value of mode is already set to "error".
# So, if the given saveMode is None, we will not call JVM-side's mode method.
if saveMode is not None:
self._jwrite = self._jwrite.mode(saveMode)
return self
@since(1.4)
def format(self, source):
"""Specifies the underlying output data source.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self._jwrite = self._jwrite.format(source)
return self
@since(1.5)
def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jwrite = self._jwrite.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self
@since(1.4)
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self
@since(2.3)
def bucketBy(self, numBuckets, col, *cols):
"""Buckets the output by the given columns.If specified,
the output is laid out on the file system similar to Hive's bucketing scheme.
:param numBuckets: the number of buckets to save
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
.. note:: Applicable for file-based data sources in combination with
:py:meth:`DataFrameWriter.saveAsTable`.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .mode("overwrite")
... .saveAsTable('bucketed_table'))
"""
if not isinstance(numBuckets, int):
raise TypeError("numBuckets should be an int, got {0}.".format(type(numBuckets)))
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.bucketBy(numBuckets, col, _to_seq(self._spark._sc, cols))
return self
@since(2.3)
def sortBy(self, col, *cols):
"""Sorts the output in each bucket by the given columns on the file system.
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .sortBy('day')
... .mode("overwrite")
... .saveAsTable('sorted_bucketed_table'))
"""
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols))
return self
@since(1.4)
def save(self, path=None, format=None, mode=None, partitionBy=None, **options):
"""Saves the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
:param options: all other string options
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if path is None:
self._jwrite.save()
else:
self._jwrite.save(path)
@since(1.4)
def insertInto(self, tableName, overwrite=False):
"""Inserts the content of the :class:`DataFrame` to the specified table.
It requires that the schema of the class:`DataFrame` is the same as the
schema of the table.
Optionally overwriting any existing data.
"""
self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName)
@since(1.4)
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `ignore` (default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name)
@since(1.4)
def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None):
"""Saves the content of the :class:`DataFrame` in JSON format
(`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(
compression=compression, dateFormat=dateFormat, timestampFormat=timestampFormat)
self._jwrite.json(path)
@since(1.4)
def parquet(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, snappy, gzip, and lzo).
This will override ``spark.sql.parquet.compression.codec``. If None
is set, it uses the value specified in
``spark.sql.parquet.compression.codec``.
>>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.parquet(path)
@since(1.6)
def text(self, path, compression=None):
"""Saves the content of the DataFrame in a text file at the specified path.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file.
"""
self._set_opts(compression=compression)
self._jwrite.text(path)
@since(2.0)
def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None,
header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None,
timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None):
"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param sep: sets the single character as a separator for each field and value. If None is
set, it uses the default value, ``,``.
:param quote: sets the single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If you would like to turn off quotations, you need to set an
empty string.
:param escape: sets the single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``
:param escapeQuotes: a flag indicating whether values containing quotes should always
be enclosed in quotes. If None is set, it uses the default value
``true``, escaping all values containing a quote character.
:param quoteAll: a flag indicating whether all values should always be enclosed in
quotes. If None is set, it uses the default value ``false``,
only escaping values containing a quote character.
:param header: writes the names of columns as the first line. If None is set, it uses
the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
>>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header,
nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll,
dateFormat=dateFormat, timestampFormat=timestampFormat,
ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace)
self._jwrite.csv(path)
@since(1.5)
def orc(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.
.. note:: Currently ORC support is only available together with Hive support.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, snappy, zlib, and lzo).
This will override ``orc.compress``. If None is set, it uses the
default value, ``snappy``.
>>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.orc(path)
@since(1.4)
def jdbc(self, url, table, mode=None, properties=None):
"""Saves the content of the :class:`DataFrame` to an external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster; \
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
self._jwrite.mode(mode).jdbc(url, table, jprop)
def _test():
import doctest
import os
import tempfile
import py4j
from pyspark.context import SparkContext
from pyspark.sql import SparkSession, Row
import pyspark.sql.readwriter
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.readwriter.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
try:
spark = SparkSession.builder.enableHiveSupport().getOrCreate()
except py4j.protocol.Py4JError:
spark = SparkSession(sc)
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.read.parquet('python/test_support/sql/parquet_partitioned')
(failure_count, test_count) = doctest.testmod(
pyspark.sql.readwriter, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
sc.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| |
# -*- coding:utf8 -*-
import os
import functools
import threading
import numpy as np
from tartist import image, random
from tartist.app import rl
from tartist.core import get_env, get_logger
from tartist.core.utils.cache import cached_result
from tartist.core.utils.meta import map_exec
from tartist.core.utils.naming import get_dump_directory
from tartist.nn import opr as O, optimizer, summary
logger = get_logger(__file__)
__envs__ = {
'dir': {
'root': get_dump_directory(__file__),
},
'dqn': {
'env_name': 'BreakoutNoFrameskip-v4',
'input_shape': (84, 84),
'nr_history_frames': 4,
'max_nr_steps': 40000,
'frame_skip': 4,
# gamma and TD steps in future_reward
# nr_td_steps must be 1
'gamma': 0.9,
'nr_td_steps': 1,
'expreplay': {
'maxsize': 500000,
},
'collector': {
'mode': 'EPISODE-STEP',
'target': 50000,
'nr_workers': 4,
'nr_predictors': 2,
'predictor_output_names': ['q_argmax'],
},
'inference': {
'nr_plays': 20,
'max_antistuck_repeat': 30
},
'demo': {
'nr_plays': 5
}
},
'trainer': {
'learning_rate': 0.0001,
'nr_epochs': 800,
# Parameters for Q-learner.
'batch_size': 64,
'epoch_size': 4000,
}
}
def make_network(env):
is_train = env.phase is env.Phase.TRAIN
with env.create_network() as net:
h, w, c = get_input_shape()
dpc = env.create_dpcontroller()
with dpc.activate():
def inputs():
state = O.placeholder('state', shape=(None, h, w, c))
next_state = O.placeholder('next_state', shape=(None, h, w, c))
return [state, next_state]
@O.auto_reuse
def phi(x):
_ = x / 255.0
# Nature structure
with O.argscope(O.conv2d, nonlin=O.relu):
_ = O.conv2d('conv1', _, 32, 8, stride=4)
_ = O.conv2d('conv2', _, 64, 4, stride=2)
_ = O.conv2d('conv3', _, 64, 3, stride=1)
return _
def forward(state, next_state):
dpc.add_output(phi(state), name='feature')
dpc.add_output(phi(next_state), name='next_feature')
dpc.set_input_maker(inputs).set_forward_func(forward)
@O.auto_reuse
def phi_fc(feature):
_ = feature
_ = O.fc('fc0', _, 512, nonlin=functools.partial(O.leaky_relu, alpha=0.01))
q_pred = O.fc('fcq', _, get_player_nr_actions())
q_max = q_pred.max(axis=1)
q_argmax = q_pred.argmax(axis=1)
return q_pred, q_max, q_argmax
_ = dpc.outputs['feature']
q_pred, q_max, q_argmax = phi_fc(_)
_ = dpc.outputs['next_feature']
next_q_pred, next_q_max, _ = phi_fc(_)
net.add_output(q_pred, name='q_pred')
net.add_output(q_max, name='q_max')
net.add_output(q_argmax, name='q_argmax')
if is_train:
reward = O.placeholder('reward', shape=(None, ), dtype='float32')
action = O.placeholder('action', shape=(None, ), dtype='int64')
is_over = O.placeholder('is_over', shape=(None, ), dtype='bool')
assert get_env('dqn.nr_td_steps') == 1
this_q_pred = (q_pred * O.one_hot(action, get_player_nr_actions())).sum(axis=1)
this_q_label = reward + get_env('dqn.gamma') * (1 - is_over.astype('float32')) * O.zero_grad(next_q_max)
summary.scalar('this_q_pred', this_q_pred.mean())
summary.scalar('this_q_label', this_q_label.mean())
summary.scalar('reward', reward.mean())
summary.scalar('is_over', is_over.astype('float32').mean())
q_loss = O.raw_smooth_l1_loss('raw_q_loss', this_q_pred, this_q_label).mean(name='q_loss')
net.set_loss(q_loss)
def make_player(is_train=True, dump_dir=None):
def resize_state(s):
return image.grayscale(image.resize(s, get_env('dqn.input_shape'), interpolation='NEAREST'))
p = rl.GymAtariRLEnviron(get_env('dqn.env_name'), live_lost_as_eoe=is_train, dump_dir=dump_dir)
p = rl.RepeatActionProxyRLEnviron(p, get_env('dqn.frame_skip'))
p = rl.MapStateProxyRLEnviron(p, resize_state)
p = rl.HistoryFrameProxyRLEnviron(p, get_env('dqn.nr_history_frames'))
p = rl.LimitLengthProxyRLEnviron(p, get_env('dqn.max_nr_steps'))
if not is_train:
p = rl.GymPreventStuckProxyRLEnviron(p, get_env('dqn.inference.max_antistuck_repeat'), 1)
return p
def make_optimizer(env):
lr = optimizer.base.make_optimizer_variable('learning_rate', get_env('trainer.learning_rate'))
wrapper = optimizer.OptimizerWrapper()
wrapper.set_base_optimizer(optimizer.base.RMSPropOptimizer(lr, epsilon=1e-3))
wrapper.append_grad_modifier(optimizer.grad_modifier.LearningRateMultiplier([
('*/b', 2.0),
]))
wrapper.append_grad_modifier(optimizer.grad_modifier.WeightDecay([
('*/W', 5e-4),
]))
env.set_optimizer(wrapper)
def make_dataflow_train(env):
rng = random.gen_rng()
def _outputs2action(outputs):
epsilon = env.runtime['exp_epsilon']
return outputs['q_argmax'] if rng.rand() > epsilon else rng.choice(get_player_nr_actions())
collector = rl.train.SynchronizedExperienceCollector(
env, make_player, _outputs2action,
nr_workers=get_env('dqn.collector.nr_workers'), nr_predictors=get_env('dqn.collector.nr_workers'),
predictor_output_names=get_env('dqn.collector.predictor_output_names'),
mode=get_env('dqn.collector.mode')
)
return rl.train.QLearningDataFlow(
collector, target=get_env('dqn.collector.target'), maxsize=get_env('dqn.expreplay.maxsize'),
batch_size=get_env('trainer.batch_size'), epoch_size=get_env('trainer.epoch_size'),
gamma=get_env('dqn.gamma'), nr_td_steps=get_env('dqn.nr_td_steps'),
reward_cb=lambda r: np.clip(r, -1, 1))
@cached_result
def get_player_nr_actions():
p = make_player()
n = p.action_space.nr_actions
del p
return n
@cached_result
def get_input_shape():
input_shape = get_env('dqn.input_shape')
nr_history_frames = get_env('dqn.nr_history_frames')
h, w, c = input_shape[0], input_shape[1], 1 * nr_history_frames
return h, w, c
def main_inference_play_multithread(trainer):
def runner():
func = trainer.env.make_func()
func.compile(trainer.env.network.outputs['q_argmax'])
player = make_player()
score = player.evaluate_one_episode(lambda state: func(state=state[np.newaxis])[0])
mgr = trainer.runtime.get('summary_histories', None)
if mgr is not None:
mgr.put_async_scalar('inference/score', score)
nr_players = get_env('dqn.inference.nr_plays')
pool = [threading.Thread(target=runner) for _ in range(nr_players)]
map_exec(threading.Thread.start, pool)
map_exec(threading.Thread.join, pool)
def main_train(trainer):
# Register plugins.
from tartist.plugins.trainer_enhancer import summary
summary.enable_summary_history(trainer, extra_summary_types={
'inference/score': 'async_scalar',
'train/exp_epsilon': 'async_scalar'
})
summary.enable_echo_summary_scalar(trainer, summary_spec={
'inference/score': ['avg', 'max']
})
from tartist.plugins.trainer_enhancer import progress
progress.enable_epoch_progress(trainer)
from tartist.plugins.trainer_enhancer import snapshot
snapshot.enable_snapshot_saver(trainer, save_interval=2)
def set_exp_epsilon(trainer_or_env, value):
r = trainer_or_env.runtime
if r.get('exp_epsilon', None) != value:
logger.critical('Setting exploration epsilon to {}'.format(value))
r['exp_epsilon'] = value
schedule = [(0, 0.1), (10, 0.1), (250, 0.01), (1e9, 0.01)]
def schedule_exp_epsilon(trainer):
# `trainer.runtime` is synchronous with `trainer.env.runtime`.
last_v = None
for e, v in schedule:
if trainer.epoch < e:
set_exp_epsilon(trainer, last_v)
break
last_v = v
def on_epoch_after(trainer):
if trainer.epoch > 0 and trainer.epoch % 2 == 0:
main_inference_play_multithread(trainer)
# Summarize the exp epsilon.
mgr = trainer.runtime.get('summary_histories', None)
if mgr is not None:
mgr.put_async_scalar('train/exp_epsilon', trainer.runtime['exp_epsilon'])
# This one should run before monitor.
trainer.register_event('epoch:before', schedule_exp_epsilon, priority=5)
trainer.register_event('epoch:after', on_epoch_after, priority=5)
trainer.train()
def main_demo(env, func):
func.compile(env.network.outputs['q_argmax'])
dump_dir = get_env('dir.demo', os.path.join(get_env('dir.root'), 'demo'))
logger.info('Demo dump dir: {}'.format(dump_dir))
player = make_player(dump_dir=dump_dir)
repeat_time = get_env('dqn.demo.nr_plays', 1)
for i in range(repeat_time):
player.play_one_episode(func=lambda state: func(state=state[np.newaxis])[0])
logger.info('#{} play score={}'.format(i, player.stats['score'][-1]))
| |
import warnings
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.db.models.indexes import Index
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
2950: 'UUIDField',
}
ignored_tables = []
_get_indexes_query = """
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s"""
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.default and 'nextval' in description.default:
if field_type == 'IntegerField':
return 'AutoField'
elif field_type == 'BigIntegerField':
return 'BigAutoField'
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [
FieldInfo(*(
(force_text(line[0]),) +
line[1:6] +
(field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1])
)) for line in cursor.description
]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute(self._get_indexes_query, [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns. Also retrieve the definition of expression-based
indexes.
"""
constraints = {}
# Loop over the key table, collecting things as constraints. The column
# array must return column names in the same order in which they were
# created.
# The subquery containing generate_series can be replaced with
# "WITH ORDINALITY" when support for PostgreSQL 9.3 is dropped.
cursor.execute("""
SELECT
c.conname,
array(
SELECT attname
FROM (
SELECT unnest(c.conkey) AS colid,
generate_series(1, array_length(c.conkey, 1)) AS arridx
) AS cols
JOIN pg_attribute AS ca ON cols.colid = ca.attnum
WHERE ca.attrelid = c.conrelid
ORDER BY cols.arridx
),
c.contype,
(SELECT fkc.relname || '.' || fka.attname
FROM pg_attribute AS fka
JOIN pg_class AS fkc ON fka.attrelid = fkc.oid
WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]),
cl.reloptions
FROM pg_constraint AS c
JOIN pg_class AS cl ON c.conrelid = cl.oid
JOIN pg_namespace AS ns ON cl.relnamespace = ns.oid
WHERE ns.nspname = %s AND cl.relname = %s
""", ["public", table_name])
for constraint, columns, kind, used_cols, options in cursor.fetchall():
constraints[constraint] = {
"columns": columns,
"primary_key": kind == "p",
"unique": kind in ["p", "u"],
"foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None,
"check": kind == "c",
"index": False,
"definition": None,
"options": options,
}
# Now get indexes
cursor.execute("""
SELECT
indexname, array_agg(attname), indisunique, indisprimary,
array_agg(ordering), amname, exprdef, s2.attoptions
FROM (
SELECT
c2.relname as indexname, idx.*, attr.attname, am.amname,
CASE
WHEN idx.indexprs IS NOT NULL THEN
pg_get_indexdef(idx.indexrelid)
END AS exprdef,
CASE am.amname
WHEN 'btree' THEN
CASE (option & 1)
WHEN 1 THEN 'DESC' ELSE 'ASC'
END
END as ordering,
c2.reloptions as attoptions
FROM (
SELECT
*, unnest(i.indkey) as key, unnest(i.indoption) as option
FROM pg_index i
) idx
LEFT JOIN pg_class c ON idx.indrelid = c.oid
LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid
LEFT JOIN pg_am am ON c2.relam = am.oid
LEFT JOIN pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key
WHERE c.relname = %s
) s2
GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions;
""", [table_name])
for index, columns, unique, primary, orders, type_, definition, options in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": columns if columns != [None] else [],
"orders": orders if orders != [None] else [],
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
"type": Index.suffix if type_ == 'btree' else type_,
"definition": definition,
"options": options,
}
return constraints
| |
# EntitlementsVerificationTokenExample.py
import blpapi
from optparse import OptionParser
SECURITY_DATA = blpapi.Name("securityData")
SECURITY = blpapi.Name("security")
EID_DATA = blpapi.Name("eidData")
AUTHORIZATION_SUCCESS = blpapi.Name("AuthorizationSuccess")
REFRENCEDATA_REQUEST = "ReferenceDataRequest"
APIAUTH_SVC = "//blp/apiauth"
REFDATA_SVC = "//blp/refdata"
g_securities = None
g_tokens = None
g_session = None
g_identities = []
def printEvent(event):
for msg in event:
corrId = msg.correlationIds()[0]
if corrId.value():
print "Correlator:", corrId.value()
print msg
class SessionEventHandler(object):
def printFailedEntitlements(self, listOfFailedEIDs):
print listOfFailedEIDs
def distributeMessage(self, msg):
service = msg.service()
securities = msg.getElement(SECURITY_DATA)
numSecurities = securities.numValues()
print "Processing %s securities" % numSecurities
for i in xrange(numSecurities):
security = securities.getValueAsElement(i)
ticker = security.getElementAsString(SECURITY)
entitlements = None
if security.hasElement(EID_DATA):
entitlements = security.getElement(EID_DATA)
if (entitlements is not None and
entitlements.isValid() and
entitlements.numValues() > 0):
for j, identity in enumerate(g_identities):
if identity.hasEntitlements(service, entitlements):
print "User: %s is entitled to get data for: %s" % \
(j + 1, ticker)
else:
print "User: %s is NOT entitled to get data for: %s " \
"- Failed eids:" % (j + 1, ticker)
self.printFailedEntitlements(
identity.getFailedEntitlements(service,
entitlements)[1])
else:
for token in g_tokens:
print "User: %s is entitled to get data for: %s" % \
(token, ticker)
# Now Distribute message to the user.
def processResponseEvent(self, event):
for msg in event:
if msg.hasElement("RESPONSE_ERROR"):
print msg
continue
self.distributeMessage(msg)
def processEvent(self, event, session):
if (event.eventType() == blpapi.Event.SESSION_STATUS or
event.eventType() == blpapi.Event.SERVICE_STATUS or
event.eventType() == blpapi.Event.REQUEST_STATUS or
event.eventType() == blpapi.Event.AUTHORIZATION_STATUS):
printEvent(event)
elif (event.eventType() == blpapi.Event.RESPONSE or
event.eventType() == blpapi.Event.PARTIAL_RESPONSE):
try:
self.processResponseEvent(event)
except blpapi.Exception as e:
print "Library Exception !!! %s" % e.description()
return True
def parseCmdLine():
# Parse command-line parameters
parser = OptionParser(
description="Entitlements verification token example")
parser.add_option("-s",
dest="securities",
help="security (default: IBM US Equity)",
metavar="security",
action="append",
default=[])
parser.add_option("-t",
"--token",
dest="tokens",
help="token value returned in generateToken response",
metavar="token",
action="append",
default=[])
parser.add_option("-a",
"--ip",
dest="host",
help="server name or IP (default: %default)",
metavar="ipAddress",
default="10.8.8.1")
parser.add_option("-p",
dest="port",
type="int",
help="server port (default: %default)",
metavar="tcpPort",
default=8194)
(options, args) = parser.parse_args()
if not options.securities:
options.securities = ["MSFT US Equity"]
return options
def authorizeUsers():
authService = g_session.getService(APIAUTH_SVC)
is_any_user_authorized = False
# Authorize each of the users
for index, token in enumerate(g_tokens):
identity = g_session.createIdentity()
g_identities.append(identity)
authRequest = authService.createAuthorizationRequest()
authRequest.set("token", token)
correlator = blpapi.CorrelationId(token)
eventQueue = blpapi.EventQueue()
g_session.sendAuthorizationRequest(authRequest,
identity,
correlator,
eventQueue)
event = eventQueue.nextEvent()
if (event.eventType() == blpapi.Event.RESPONSE or
event.eventType() == blpapi.Event.REQUEST_STATUS):
for msg in event:
if msg.messageType() == AUTHORIZATION_SUCCESS:
print "User %s authorization success" % (index + 1)
is_any_user_authorized = True
else:
print "User %s authorization failed" % (index + 1)
printEvent(event)
return is_any_user_authorized
def sendRefDataRequest():
refDataService = g_session.getService(REFDATA_SVC)
request = refDataService.createRequest(REFRENCEDATA_REQUEST)
# Add securities to the request
securities = request.getElement("securities")
for security in g_securities:
securities.appendValue(security)
# Add fields to the request
fields = request.getElement("fields")
fields.appendValue("PX_LAST")
fields.appendValue("DS002")
request.set("returnEids", True)
# Send the request using the server's credentials
print "Sending RefDataRequest using server credentials..."
g_session.sendRequest(request)
def main():
global g_session, g_securities, g_tokens
options = parseCmdLine()
# Create SessionOptions object and populate it with data
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(options.host)
sessionOptions.setServerPort(options.port)
g_securities = options.securities
if not options.tokens:
print "No tokens were specified"
return
g_tokens = options.tokens
print g_tokens
# Create Session object and connect to Bloomberg services
print "Connecting to %s:%s" % (options.host, options.port)
eventHandler = SessionEventHandler()
g_session = blpapi.Session(sessionOptions, eventHandler.processEvent)
if not g_session.start():
print "Failed to start session."
return
# Open authorization service
if not g_session.openService("//blp/apiauth"):
print "Failed to open //blp/apiauth"
return
# Open reference data service
if not g_session.openService("//blp/refdata"):
print "Failed to open //blp/refdata"
return
# Authorize all the users that are interested in receiving data
if authorizeUsers():
# Make the various requests that we need to make
sendRefDataRequest()
try:
# Wait for enter key to exit application
print "Press ENTER to quit"
raw_input()
finally:
# Stop the session
g_session.stop()
if __name__ == "__main__":
print "EntitlementsVerificationTokenExample"
try:
main()
except KeyboardInterrupt:
print "Ctrl+C pressed. Stopping..."
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
| |
"""Funtions for dealing with for HTTP clients in a unified manner"""
import asyncio
import sys
import urllib.request
from functools import partial, singledispatch
from http.client import HTTPResponse
from io import BytesIO
from itertools import starmap
from urllib.error import HTTPError
from urllib.parse import urlencode
from .http import Response
__all__ = ["send", "send_async"]
_ASYNCIO_USER_AGENT = "Python-asyncio/3.{}".format(sys.version_info.minor)
@singledispatch
def send(client, request):
"""Given a client, send a :class:`~snug.http.Request`,
returning a :class:`~snug.http.Response`.
A :func:`~functools.singledispatch` function.
Parameters
----------
client: any registered client type
The client with which to send the request.
Client types registered by default:
* :class:`urllib.request.OpenerDirector`
(e.g. from :func:`~urllib.request.build_opener`)
* :class:`requests.Session`
(if `requests <http://docs.python-requests.org/>`_ is installed)
request: Request
The request to send
Returns
-------
Response
the resulting response
Example of registering a new HTTP client:
>>> @send.register(MyClientClass)
... def _send(client, request: Request) -> Response:
... r = client.send(request)
... return Response(r.status, r.read(), headers=r.get_headers())
"""
raise TypeError("client {!r} not registered".format(client))
@singledispatch
def send_async(client, request):
"""Given a client, send a :class:`~snug.http.Request`,
returning an awaitable :class:`~snug.http.Response`.
A :func:`~functools.singledispatch` function.
Parameters
----------
client: any registered client type
The client with which to send the request.
Client types supported by default:
* :class:`asyncio.AbstractEventLoop`
(e.g. from :func:`~asyncio.get_event_loop`)
* :class:`aiohttp.ClientSession`
(if `aiohttp <http://aiohttp.readthedocs.io/>`_ is installed)
request: Request
The request to send
Returns
-------
Response
the resulting response
Example of registering a new HTTP client:
>>> @send_async.register(MyClientClass)
... async def _send(client, request: Request) -> Response:
... r = await client.send(request)
... return Response(r.status, r.read(), headers=r.get_headers())
"""
raise TypeError("client {!r} not registered".format(client))
@send.register(urllib.request.OpenerDirector)
def _urllib_send(opener, req, **kwargs):
"""Send a request with an :mod:`urllib` opener"""
if req.content and not any(
h.lower() == "content-type" for h in req.headers
):
req = req.with_headers({"Content-Type": "application/octet-stream"})
url = req.url + "?" + urlencode(req.params)
raw_req = urllib.request.Request(url, req.content, headers=req.headers)
raw_req.method = req.method
try:
res = opener.open(raw_req, **kwargs)
except HTTPError as http_err:
res = http_err
return Response(res.getcode(), content=res.read(), headers=res.headers)
class _SocketAdaptor:
def __init__(self, io):
self._file = io
def makefile(self, *args, **kwargs):
return self._file
@send_async.register(asyncio.AbstractEventLoop)
async def _asyncio_send(loop, req, *, timeout=10, max_redirects=10):
"""A rudimentary HTTP client using :mod:`asyncio`"""
if not any(h.lower() == "user-agent" for h in req.headers):
req = req.with_headers({"User-Agent": _ASYNCIO_USER_AGENT})
url = urllib.parse.urlsplit(
req.url + "?" + urllib.parse.urlencode(req.params)
)
open_ = partial(asyncio.open_connection, url.hostname)
connect = open_(443, ssl=True) if url.scheme == "https" else open_(80)
reader, writer = await connect
try:
headers = "\r\n".join(
[
"{} {} HTTP/1.1".format(
req.method, url.path + "?" + url.query
),
"Host: " + url.hostname,
"Connection: close",
"Content-Length: {}".format(len(req.content or b"")),
"\r\n".join(starmap("{}: {}".format, req.headers.items())),
]
)
writer.write(
b"\r\n".join([headers.encode("latin-1"), b"", req.content or b""])
)
response_bytes = BytesIO(
await asyncio.wait_for(reader.read(), timeout=timeout)
)
finally:
writer.close()
resp = HTTPResponse(
_SocketAdaptor(response_bytes), method=req.method, url=req.url
)
resp.begin()
status = resp.getcode()
if 300 <= status < 400 and "Location" in resp.headers and max_redirects:
new_url = urllib.parse.urljoin(req.url, resp.headers["Location"])
return await _asyncio_send(
loop,
req.replace(url=new_url),
timeout=timeout,
max_redirects=max_redirects - 1,
)
return Response(status, content=resp.read(), headers=resp.headers)
try:
import requests
except ImportError: # pragma: no cover
pass
else:
@send.register(requests.Session)
def _requests_send(session, req):
"""send a request with the `requests` library"""
res = session.request(
req.method,
req.url,
data=req.content,
params=req.params,
headers=req.headers,
)
return Response(res.status_code, res.content, headers=res.headers)
try:
import aiohttp
except ImportError: # pragma: no cover
pass
else:
@send_async.register(aiohttp.ClientSession)
async def _aiohttp_send(session, req):
"""send a request with the `aiohttp` library"""
async with session.request(
req.method,
req.url,
params=req.params,
data=req.content,
headers=req.headers,
) as resp:
return Response(
resp.status, content=await resp.read(), headers=resp.headers
)
try:
import httpx
except ImportError: # pragma: no cover
pass
else:
@send.register(httpx.Client)
def _httpx_send_sync(client, req):
"""send a request with the `httpx` library"""
res = client.request(
req.method,
req.url,
params=req.params,
content=req.content,
headers=req.headers,
)
return Response(res.status_code, res.content, headers=res.headers)
@send_async.register(httpx.AsyncClient)
async def _httpx_send_async(client, req):
"""send a request with the `httpx` library"""
res = await client.request(
req.method,
req.url,
params=req.params,
content=req.content,
headers=req.headers,
)
return Response(res.status_code, res.content, headers=res.headers)
| |
import logging
import sys
import urllib
import requests
import simplejson as json
from requests.auth import HTTPBasicAuth
from redash.query_runner import *
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
logger = logging.getLogger(__name__)
ELASTICSEARCH_TYPES_MAPPING = {
"integer": TYPE_INTEGER,
"long": TYPE_INTEGER,
"float": TYPE_FLOAT,
"double": TYPE_FLOAT,
"boolean": TYPE_BOOLEAN,
"string": TYPE_STRING,
"date": TYPE_DATE,
"object": TYPE_STRING,
# "geo_point" TODO: Need to split to 2 fields somehow
}
ELASTICSEARCH_BUILTIN_FIELDS_MAPPING = {
"_id": "Id",
"_score": "Score"
}
PYTHON_TYPES_MAPPING = {
str: TYPE_STRING,
unicode: TYPE_STRING,
bool: TYPE_BOOLEAN,
int: TYPE_INTEGER,
long: TYPE_INTEGER,
float: TYPE_FLOAT
}
class BaseElasticSearch(BaseQueryRunner):
DEBUG_ENABLED = False
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'server': {
'type': 'string',
'title': 'Base URL'
},
'basic_auth_user': {
'type': 'string',
'title': 'Basic Auth User'
},
'basic_auth_password': {
'type': 'string',
'title': 'Basic Auth Password'
}
},
"secret": ["basic_auth_password"],
"required": ["server"]
}
@classmethod
def enabled(cls):
return False
def __init__(self, configuration):
super(BaseElasticSearch, self).__init__(configuration)
self.syntax = "json"
if self.DEBUG_ENABLED:
http_client.HTTPConnection.debuglevel = 1
# you need to initialize logging, otherwise you will not see anything from requests
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
logger.setLevel(logging.DEBUG)
self.server_url = self.configuration["server"]
if self.server_url[-1] == "/":
self.server_url = self.server_url[:-1]
basic_auth_user = self.configuration.get("basic_auth_user", None)
basic_auth_password = self.configuration.get("basic_auth_password", None)
self.auth = None
if basic_auth_user and basic_auth_password:
self.auth = HTTPBasicAuth(basic_auth_user, basic_auth_password)
def _get_mappings(self, url):
mappings = {}
error = None
try:
r = requests.get(url, auth=self.auth)
r.raise_for_status()
mappings = r.json()
except requests.HTTPError as e:
logger.exception(e)
error = "Failed to execute query. Return Code: {0} Reason: {1}".format(r.status_code, r.text)
mappings = None
except requests.exceptions.RequestException as e:
logger.exception(e)
error = "Connection refused"
mappings = None
return mappings, error
def _get_query_mappings(self, url):
mappings_data, error = self._get_mappings(url)
if error:
return mappings_data, error
mappings = {}
for index_name in mappings_data:
index_mappings = mappings_data[index_name]
for m in index_mappings.get("mappings", {}):
if "properties" not in index_mappings["mappings"][m]:
continue
for property_name in index_mappings["mappings"][m]["properties"]:
property_data = index_mappings["mappings"][m]["properties"][property_name]
if property_name not in mappings:
property_type = property_data.get("type", None)
if property_type:
if property_type in ELASTICSEARCH_TYPES_MAPPING:
mappings[property_name] = ELASTICSEARCH_TYPES_MAPPING[property_type]
else:
mappings[property_name] = TYPE_STRING
#raise Exception("Unknown property type: {0}".format(property_type))
return mappings, error
def get_schema(self, *args, **kwargs):
def parse_doc(doc, path=None):
'''Recursively parse a doc type dictionary
'''
path = path or []
result = []
for field, description in doc['properties'].items():
if 'properties' in description:
result.extend(parse_doc(description, path + [field]))
else:
result.append('.'.join(path + [field]))
return result
schema = {}
url = "{0}/_mappings".format(self.server_url)
mappings, error = self._get_mappings(url)
if mappings:
# make a schema for each index
# the index contains a mappings dict with documents
# in a hierarchical format
for name, index in mappings.items():
columns = []
schema[name] = {'name': name}
for doc, items in index['mappings'].items():
columns.extend(parse_doc(items))
# remove duplicates
# sort alphabetically
schema[name]['columns'] = sorted(set(columns))
return schema.values()
def _parse_results(self, mappings, result_fields, raw_result, result_columns, result_rows):
def add_column_if_needed(mappings, column_name, friendly_name, result_columns, result_columns_index):
if friendly_name not in result_columns_index:
result_columns.append({
"name": friendly_name,
"friendly_name": friendly_name,
"type": mappings.get(column_name, "string")})
result_columns_index[friendly_name] = result_columns[-1]
def get_row(rows, row):
if row is None:
row = {}
rows.append(row)
return row
def collect_value(mappings, row, key, value, type):
if result_fields and key not in result_fields_index:
return
mappings[key] = type
add_column_if_needed(mappings, key, key, result_columns, result_columns_index)
row[key] = value
def collect_aggregations(mappings, rows, parent_key, data, row, result_columns, result_columns_index):
if isinstance(data, dict):
for key, value in data.iteritems():
val = collect_aggregations(mappings, rows, parent_key if key == 'buckets' else key, value, row, result_columns, result_columns_index)
if val:
row = get_row(rows, row)
collect_value(mappings, row, key, val, 'long')
for data_key in ['value', 'doc_count']:
if data_key not in data:
continue
if 'key' in data and len(data.keys()) == 2:
key_is_string = 'key_as_string' in data
collect_value(mappings, row, data['key'] if not key_is_string else data['key_as_string'], data[data_key], 'long' if not key_is_string else 'string')
else:
return data[data_key]
elif isinstance(data, list):
for value in data:
result_row = get_row(rows, row)
collect_aggregations(mappings, rows, parent_key, value, result_row, result_columns, result_columns_index)
if 'doc_count' in value:
collect_value(mappings, result_row, 'doc_count', value['doc_count'], 'integer')
if 'key' in value:
if 'key_as_string' in value:
collect_value(mappings, result_row, parent_key, value['key_as_string'], 'string')
else:
collect_value(mappings, result_row, parent_key, value['key'], 'string')
return None
result_columns_index = {c["name"]: c for c in result_columns}
result_fields_index = {}
if result_fields:
for r in result_fields:
result_fields_index[r] = None
if 'error' in raw_result:
error = raw_result['error']
if len(error) > 10240:
error = error[:10240] + '... continues'
raise Exception(error)
elif 'aggregations' in raw_result:
if result_fields:
for field in result_fields:
add_column_if_needed(mappings, field, field, result_columns, result_columns_index)
for key, data in raw_result["aggregations"].iteritems():
collect_aggregations(mappings, result_rows, key, data, None, result_columns, result_columns_index)
logger.debug("result_rows %s", str(result_rows))
logger.debug("result_columns %s", str(result_columns))
elif 'hits' in raw_result and 'hits' in raw_result['hits']:
if result_fields:
for field in result_fields:
add_column_if_needed(mappings, field, field, result_columns, result_columns_index)
for h in raw_result["hits"]["hits"]:
row = {}
column_name = "_source" if "_source" in h else "fields"
for column in h[column_name]:
if result_fields and column not in result_fields_index:
continue
add_column_if_needed(mappings, column, column, result_columns, result_columns_index)
value = h[column_name][column]
row[column] = value[0] if isinstance(value, list) and len(value) == 1 else value
result_rows.append(row)
else:
raise Exception("Redash failed to parse the results it got from Elasticsearch.")
def test_connection(self):
try:
r = requests.get("{0}/_cluster/health".format(self.server_url), auth=self.auth)
r.raise_for_status()
except requests.HTTPError as e:
logger.exception(e)
raise Exception("Failed to execute query. Return Code: {0} Reason: {1}".format(r.status_code, r.text))
except requests.exceptions.RequestException as e:
logger.exception(e)
raise Exception("Connection refused")
class Kibana(BaseElasticSearch):
@classmethod
def enabled(cls):
return True
@classmethod
def annotate_query(cls):
return False
def _execute_simple_query(self, url, auth, _from, mappings, result_fields, result_columns, result_rows):
url += "&from={0}".format(_from)
r = requests.get(url, auth=self.auth)
r.raise_for_status()
raw_result = r.json()
self._parse_results(mappings, result_fields, raw_result, result_columns, result_rows)
total = raw_result["hits"]["total"]
result_size = len(raw_result["hits"]["hits"])
logger.debug("Result Size: {0} Total: {1}".format(result_size, total))
return raw_result["hits"]["total"]
def run_query(self, query, user):
try:
error = None
logger.debug(query)
query_params = json.loads(query)
index_name = query_params["index"]
query_data = query_params["query"]
size = int(query_params.get("size", 500))
limit = int(query_params.get("limit", 500))
result_fields = query_params.get("fields", None)
sort = query_params.get("sort", None)
if not self.server_url:
error = "Missing configuration key 'server'"
return None, error
url = "{0}/{1}/_search?".format(self.server_url, index_name)
mapping_url = "{0}/{1}/_mapping".format(self.server_url, index_name)
mappings, error = self._get_query_mappings(mapping_url)
if error:
return None, error
#logger.debug(json.dumps(mappings, indent=4))
if sort:
url += "&sort={0}".format(urllib.quote_plus(sort))
url += "&q={0}".format(urllib.quote_plus(query_data))
logger.debug("Using URL: {0}".format(url))
logger.debug("Using Query: {0}".format(query_data))
result_columns = []
result_rows = []
if isinstance(query_data, str) or isinstance(query_data, unicode):
_from = 0
while True:
query_size = size if limit >= (_from + size) else (limit - _from)
total = self._execute_simple_query(url + "&size={0}".format(query_size), self.auth, _from, mappings, result_fields, result_columns, result_rows)
_from += size
if _from >= limit:
break
else:
# TODO: Handle complete ElasticSearch queries (JSON based sent over HTTP POST)
raise Exception("Advanced queries are not supported")
json_data = json.dumps({
"columns": result_columns,
"rows": result_rows
})
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except requests.HTTPError as e:
logger.exception(e)
error = "Failed to execute query. Return Code: {0} Reason: {1}".format(r.status_code, r.text)
json_data = None
except requests.exceptions.RequestException as e:
logger.exception(e)
error = "Connection refused"
json_data = None
except Exception as e:
logger.exception(e)
raise sys.exc_info()[1], None, sys.exc_info()[2]
return json_data, error
class ElasticSearch(BaseElasticSearch):
@classmethod
def enabled(cls):
return True
@classmethod
def annotate_query(cls):
return False
@classmethod
def name(cls):
return 'Elasticsearch'
def run_query(self, query, user):
try:
error = None
logger.debug(query)
query_dict = json.loads(query)
index_name = query_dict.pop("index", "")
result_fields = query_dict.pop("result_fields", None)
if not self.server_url:
error = "Missing configuration key 'server'"
return None, error
url = "{0}/{1}/_search".format(self.server_url, index_name)
mapping_url = "{0}/{1}/_mapping".format(self.server_url, index_name)
mappings, error = self._get_query_mappings(mapping_url)
if error:
return None, error
logger.debug("Using URL: %s", url)
logger.debug("Using query: %s", query_dict)
r = requests.get(url, json=query_dict, auth=self.auth)
r.raise_for_status()
logger.debug("Result: %s", r.json())
result_columns = []
result_rows = []
self._parse_results(mappings, result_fields, r.json(), result_columns, result_rows)
json_data = json.dumps({
"columns": result_columns,
"rows": result_rows
})
except KeyboardInterrupt:
logger.exception(e)
error = "Query cancelled by user."
json_data = None
except requests.HTTPError as e:
logger.exception(e)
error = "Failed to execute query. Return Code: {0} Reason: {1}".format(r.status_code, r.text)
json_data = None
except requests.exceptions.RequestException as e:
logger.exception(e)
error = "Connection refused"
json_data = None
except Exception as e:
logger.exception(e)
raise sys.exc_info()[1], None, sys.exc_info()[2]
return json_data, error
register(Kibana)
register(ElasticSearch)
| |
from __future__ import unicode_literals
import os
from collections import OrderedDict
from django.contrib.staticfiles.finders import get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.files.storage import FileSystemStorage
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.utils.encoding import smart_text
from django.utils.six.moves import input
class Command(BaseCommand):
"""
Command that allows to copy or symlink static files from different
locations to the settings.STATIC_ROOT.
"""
help = "Collect static files in a single location."
requires_system_checks = False
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.copied_files = []
self.symlinked_files = []
self.unmodified_files = []
self.post_processed_files = []
self.storage = staticfiles_storage
self.style = no_style()
try:
self.storage.path('')
except NotImplementedError:
self.local = False
else:
self.local = True
def add_arguments(self, parser):
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help="Do NOT prompt the user for input of any kind.")
parser.add_argument('--no-post-process',
action='store_false', dest='post_process', default=True,
help="Do NOT post process collected files.")
parser.add_argument('-i', '--ignore', action='append', default=[],
dest='ignore_patterns', metavar='PATTERN',
help="Ignore files or directories matching this glob-style "
"pattern. Use multiple times to ignore more.")
parser.add_argument('-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except modify the filesystem.")
parser.add_argument('-c', '--clear',
action='store_true', dest='clear', default=False,
help="Clear the existing files using the storage "
"before trying to copy or link the original file.")
parser.add_argument('-l', '--link',
action='store_true', dest='link', default=False,
help="Create a symbolic link to each file instead of copying.")
parser.add_argument('--no-default-ignore', action='store_false',
dest='use_default_ignore_patterns', default=True,
help="Don't ignore the common private glob-style patterns 'CVS', "
"'.*' and '*~'.")
def set_options(self, **options):
"""
Set instance variables based on an options dict
"""
self.interactive = options['interactive']
self.verbosity = options['verbosity']
self.symlink = options['link']
self.clear = options['clear']
self.dry_run = options['dry_run']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += ['CVS', '.*', '*~']
self.ignore_patterns = list(set(ignore_patterns))
self.post_process = options['post_process']
def collect(self):
"""
Perform the bulk of the work of collectstatic.
Split off from handle() to facilitate testing.
"""
if self.symlink and not self.local:
raise CommandError("Can't symlink to a remote destination.")
if self.clear:
self.clear_dir('')
if self.symlink:
handler = self.link_file
else:
handler = self.copy_file
found_files = OrderedDict()
for finder in get_finders():
for path, storage in finder.list(self.ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
if prefixed_path not in found_files:
found_files[prefixed_path] = (storage, path)
handler(path, prefixed_path, storage)
else:
self.log(
"Found another file with the destination path '%s'. It "
"will be ignored since only the first encountered file "
"is collected. If this is not what you want, make sure "
"every static file has a unique path." % prefixed_path,
level=1,
)
# Here we check if the storage backend has a post_process
# method and pass it the list of modified files.
if self.post_process and hasattr(self.storage, 'post_process'):
processor = self.storage.post_process(found_files,
dry_run=self.dry_run)
for original_path, processed_path, processed in processor:
if isinstance(processed, Exception):
self.stderr.write("Post-processing '%s' failed!" % original_path)
# Add a blank line before the traceback, otherwise it's
# too easy to miss the relevant part of the error message.
self.stderr.write("")
raise processed
if processed:
self.log("Post-processed '%s' as '%s'" %
(original_path, processed_path), level=1)
self.post_processed_files.append(original_path)
else:
self.log("Skipped post-processing '%s'" % original_path)
return {
'modified': self.copied_files + self.symlinked_files,
'unmodified': self.unmodified_files,
'post_processed': self.post_processed_files,
}
def handle(self, **options):
self.set_options(**options)
message = ['\n']
if self.dry_run:
message.append(
'You have activated the --dry-run option so no files will be modified.\n\n'
)
message.append(
'You have requested to collect static files at the destination\n'
'location as specified in your settings'
)
if self.is_local_storage() and self.storage.location:
destination_path = self.storage.location
message.append(':\n\n %s\n\n' % destination_path)
else:
destination_path = None
message.append('.\n\n')
if self.clear:
message.append('This will DELETE ALL FILES in this location!\n')
else:
message.append('This will overwrite existing files!\n')
message.append(
'Are you sure you want to do this?\n\n'
"Type 'yes' to continue, or 'no' to cancel: "
)
if self.interactive and input(''.join(message)) != 'yes':
raise CommandError("Collecting static files cancelled.")
collected = self.collect()
modified_count = len(collected['modified'])
unmodified_count = len(collected['unmodified'])
post_processed_count = len(collected['post_processed'])
if self.verbosity >= 1:
template = ("\n%(modified_count)s %(identifier)s %(action)s"
"%(destination)s%(unmodified)s%(post_processed)s.\n")
summary = template % {
'modified_count': modified_count,
'identifier': 'static file' + ('' if modified_count == 1 else 's'),
'action': 'symlinked' if self.symlink else 'copied',
'destination': (" to '%s'" % destination_path if destination_path else ''),
'unmodified': (', %s unmodified' % unmodified_count if collected['unmodified'] else ''),
'post_processed': (collected['post_processed'] and
', %s post-processed'
% post_processed_count or ''),
}
self.stdout.write(summary)
def log(self, msg, level=2):
"""
Small log helper
"""
if self.verbosity >= level:
self.stdout.write(msg)
def is_local_storage(self):
return isinstance(self.storage, FileSystemStorage)
def clear_dir(self, path):
"""
Deletes the given relative path using the destination storage backend.
"""
if not self.storage.exists(path):
return
dirs, files = self.storage.listdir(path)
for f in files:
fpath = os.path.join(path, f)
if self.dry_run:
self.log("Pretending to delete '%s'" %
smart_text(fpath), level=1)
else:
self.log("Deleting '%s'" % smart_text(fpath), level=1)
self.storage.delete(fpath)
for d in dirs:
self.clear_dir(os.path.join(path, d))
def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if self.storage.exists(prefixed_path):
try:
# When was the target file modified last time?
target_last_modified = \
self.storage.modified_time(prefixed_path)
except (OSError, NotImplementedError, AttributeError):
# The storage doesn't support ``modified_time`` or failed
pass
else:
try:
# When was the source file modified last time?
source_last_modified = source_storage.modified_time(path)
except (OSError, NotImplementedError, AttributeError):
pass
else:
# The full path of the target file
if self.local:
full_path = self.storage.path(prefixed_path)
else:
full_path = None
# Skip the file if the source file is younger
# Avoid sub-second precision (see #14665, #19540)
if (target_last_modified.replace(microsecond=0)
>= source_last_modified.replace(microsecond=0)):
if not ((self.symlink and full_path
and not os.path.islink(full_path)) or
(not self.symlink and full_path
and os.path.islink(full_path))):
if prefixed_path not in self.unmodified_files:
self.unmodified_files.append(prefixed_path)
self.log("Skipping '%s' (not modified)" % path)
return False
# Then delete the existing file if really needed
if self.dry_run:
self.log("Pretending to delete '%s'" % path)
else:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
return True
def link_file(self, path, prefixed_path, source_storage):
"""
Attempt to link ``path``
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.symlinked_files:
return self.log("Skipping '%s' (already linked earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally link the file
if self.dry_run:
self.log("Pretending to link '%s'" % source_path, level=1)
else:
self.log("Linking '%s'" % source_path, level=1)
full_path = self.storage.path(prefixed_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
try:
if os.path.lexists(full_path):
os.unlink(full_path)
os.symlink(source_path, full_path)
except AttributeError:
import platform
raise CommandError("Symlinking is not supported by Python %s." %
platform.python_version())
except NotImplementedError:
import platform
raise CommandError("Symlinking is not supported in this "
"platform (%s)." % platform.platform())
except OSError as e:
raise CommandError(e)
if prefixed_path not in self.symlinked_files:
self.symlinked_files.append(prefixed_path)
def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=1)
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
self.copied_files.append(prefixed_path)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class sslvserver_sslpolicy_binding(base_resource) :
""" Binding class showing the sslpolicy that can be bound to sslvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._polinherit = 0
self._gotopriorityexpression = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._vservername = ""
self.___count = 0
@property
def priority(self) :
"""The priority of the policies bound to this SSL service.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority of the policies bound to this SSL service.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policyname(self) :
"""The name of the SSL policy binding.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""The name of the SSL policy binding.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def vservername(self) :
"""Name of the SSL virtual server.<br/>Minimum length = 1.
"""
try :
return self._vservername
except Exception as e:
raise e
@vservername.setter
def vservername(self, vservername) :
"""Name of the SSL virtual server.<br/>Minimum length = 1
"""
try :
self._vservername = vservername
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke flag. This attribute is relevant only for ADVANCED policies.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke flag. This attribute is relevant only for ADVANCED policies.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label invocation.<br/>Possible values = vserver, service, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""Type of policy label invocation.<br/>Possible values = vserver, service, policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def polinherit(self) :
"""Whether the bound policy is a inherited policy or not.
"""
try :
return self._polinherit
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(sslvserver_sslpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.sslvserver_sslpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.vservername) :
return str(self.vservername)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = sslvserver_sslpolicy_binding()
updateresource.vservername = resource.vservername
updateresource.policyname = resource.policyname
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [sslvserver_sslpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].vservername = resource[i].vservername
updateresources[i].policyname = resource[i].policyname
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = sslvserver_sslpolicy_binding()
deleteresource.vservername = resource.vservername
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [sslvserver_sslpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].vservername = resource[i].vservername
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, vservername) :
""" Use this API to fetch sslvserver_sslpolicy_binding resources.
"""
try :
obj = sslvserver_sslpolicy_binding()
obj.vservername = vservername
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, vservername, filter_) :
""" Use this API to fetch filtered set of sslvserver_sslpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslvserver_sslpolicy_binding()
obj.vservername = vservername
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, vservername) :
""" Use this API to count sslvserver_sslpolicy_binding resources configued on NetScaler.
"""
try :
obj = sslvserver_sslpolicy_binding()
obj.vservername = vservername
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, vservername, filter_) :
""" Use this API to count the filtered set of sslvserver_sslpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslvserver_sslpolicy_binding()
obj.vservername = vservername
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Ecccurvename:
ALL = "ALL"
P_224 = "P_224"
P_256 = "P_256"
P_384 = "P_384"
P_521 = "P_521"
class Ocspcheck:
Mandatory = "Mandatory"
Optional = "Optional"
class Crlcheck:
Mandatory = "Mandatory"
Optional = "Optional"
class Labeltype:
vserver = "vserver"
service = "service"
policylabel = "policylabel"
class sslvserver_sslpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.sslvserver_sslpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.sslvserver_sslpolicy_binding = [sslvserver_sslpolicy_binding() for _ in range(length)]
| |
import sys
import os
import argparse
import yaml
import json
from pytz import timezone, utc
from datetime import datetime
from dateutil import parser, relativedelta
from simple_salesforce import Salesforce, SalesforceLogin, SFType
# define arguments
# will read default from ~/.hosd.yml
argp = argparse.ArgumentParser(description='Clone Occurrence.')
argp.add_argument('config', nargs='+', help='yaml file that contain occurrences config')
argp.add_argument('--username', help='username USERNAME')
argp.add_argument('--password', help='password PASSWORD')
argp.add_argument('--token', help='token SECURITY_TOKEN')
argp.add_argument('--occurrence', help='occurrence OCCURRENCE_NAME')
argp.add_argument('--date', help='date OCCURRENCE_DATE(yyyy-mm-dd)')
argp.add_argument('--timezone', help='timezone TIMEZONE(eg.US/Pacific)')
argp.add_argument('--dry', help='dry run', action='store_true')
argp.add_argument('--debug', help='debug', action='store_true')
#sf = Salesforce(username='myemail@example.com', password='password', security_token='token')
def clone_occurrence(sf, oc_name, new_date, tz, dry_run=False, debug=False):
# query for occurrence_name
qres = sf.query("SELECT Id FROM HOC__Occurrence__c where Name = '%s'" % (oc_name))
if ('records' not in qres) or (len(qres['records']) < 1) or ('Id' not in qres['records'][0]):
print "Occurence %s not found !" % (oc_name)
return -1
# create Occurrence data type
Occurrence = SFType('HOC__Occurrence__c', sf.session_id, sf.sf_instance)
oc = Occurrence.get(qres['records'][0]['Id'])
if not oc:
print "Failed to retrieve Occurrence %s" % (qres['records'][0]['Id'])
return
# get Opportunity
VolunteerOpportunity = SFType('HOC__Volunteer_Opportunity__c', sf.session_id, sf.sf_instance)
op = VolunteerOpportunity.get(oc['HOC__Volunteer_Opportunity__c'])
if not op:
print "Failed to retrieve Volunteer Opportunity%s" % (oc['HOC__Volunteer_Opportunity__c'])
return
# do date calculation
old_start_datetime = parser.parse(oc['HOC__Start_Date_Time__c'])
old_end_datetime = parser.parse(oc['HOC__End_Date_Time__c'])
# need to make sure we calculate delta date in the right timezone,
# otherwise it can mess up the calculation
delta = new_date - old_start_datetime.astimezone(tz).date()
# this weird formula is to add delta while maintaining the correct timezone
# first add delta, then remove timezone (to maintain the same hour)
# then add back the timezone, so we can calculate utc timezone correctly afterward
new_start_datetime = tz.localize((old_start_datetime.astimezone(tz) + delta).replace(tzinfo=None)).astimezone(utc)
new_start_datetime_str = new_start_datetime.isoformat()
new_end_datetime = tz.localize((old_end_datetime.astimezone(tz) + delta).replace(tzinfo=None)).astimezone(utc)
new_end_datetime_str = new_end_datetime.isoformat()
new_start_tz = new_start_datetime.astimezone(tz)
print "========================================="
print "Occurrence Id: " + oc_name
print "Project Name: " + op['Name']
print "Volunteer Coordinator Name: " + oc['HOC__Volunteer_Coordinator_Name__c']
print "Volunteer Coordinator Email: " + oc['HOC__Volunteer_Coordinator_Email__c']
print "Days Time Needed: " + oc['HOC__Days_Times_Needed__c']
print "Clone to date (UTC): " + str(new_start_datetime)
if debug:
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
print "Original Data"
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
print json.dumps(oc, sort_keys=True, indent=4, separators=(',',':'))
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# process properties
new_oc = {}
# we will go through the oc, and calling it one by one, for every key in the oc we will
# check the modifier.
# If the value exists:
# accept lambda to do some processing to it, will be passed the oc and new_oc as parameter
# lambda (key, old_oc, new_oc)
# otherwise if it is None, it will be removed
# if the value doesn't exist, it will be copied as is
oc_modifier = {
u'attributes': None,
u'Id': None,
# u'OwnerId': None,
u'IsDeleted': None,
u'Name': None,
u'CreatedDate': None,
u'CreatedById': None,
u'LastModifiedDate': None,
u'LastModifiedById': None,
u'SystemModstamp': None,
u'LastActivityDate': None,
u'ConnectionReceivedId': None,
u'ConnectionSentId': None,
u'HOC__City__c': None,
u'HOC__Country__c': None,
#u'HOC__Days_Times_Needed__c': None,
u'HOC__End_Date_Time__c': lambda k,ooc,noc:new_end_datetime_str,
u'HOC__Google_Map_URL__c': None,
u'HOC__HOC_Domain_Name__c': None,
u'HOC__HOC_ID__c': None,
u'HOC__Import_ID__c': None,
#u'HOC__Location__c': None,
u'HOC__Managing_Organization_Name__c': None,
#u'HOC__Maximum_Attendance__c': None,
#u'HOC__Minimum_Attendance__c': None,
u'HOC__Occurrence_URL__c': None,
#u'HOC__Opportunity_Approval_Manager_Email__c': None,
#u'HOC__Partner_Staff_Email__c': None,
u'HOC__Posting_Status__c': None,
#u'HOC__Recurrence__c': None,
u'HOC__Registration_Deadline__c': None,
u'HOC__Registration_Start_Date__c': None,
u'HOC__Schedule_Type__c': None,
u'HOC__Serial_Number__c': None,
u'HOC__Start_Date_Time__c': lambda k,ooc,noc:new_start_datetime_str,
u'HOC__State_Province__c': None,
#u'HOC__Status__c': None,
u'HOC__Street__c': None,
u'HOC__Total_Attended__c': None,
u'HOC__Total_Hours_Served__c': None,
#u'HOC__Volunteer_Coordinator_Email__c': None,
#u'HOC__Volunteer_Coordinator_Name__c': None,
#u'HOC__Volunteer_Leader_Needed__c': None,
u'HOC__Volunteer_Opportunity_Type__c': None,
#u'HOC__Volunteer_Opportunity__c': None,
u'HOC__Volunteers_Still_Needed__c': None,
u'HOC__Zip_Postal_Code__c': None,
u'HOC__Guest_Volunteer_Hours_Served__c': None,
u'HOC__Guest_Volunteers_Attended__c': None,
u'HOC__Total_Confirmed__c': None,
u'HOC__Total_Connections__c': None,
u'HOC__Total_Declined__c': None,
u'HOC__Total_Not_Attended__c': None,
u'HOC__Total_Pending__c': None,
u'HOC__Total_Unreported__c': None,
u'HOC__Volunteer_Hours_Served__c': None,
u'HOC__Volunteers_Attended__c': None,
u'HOC__Guest_Volunteer_Number_Hours_Served__c': None,
#u'HOC__Opportunity_Coordinator__c': None,
u'HOC__Total_Number_Hours_Served__c': None,
u'HOC__Update_Connections_Status__c': None,
u'HOC__Volunteer_Number_Hours_Served__c': None,
u'HOC__CreationSource__c': None,
u'HOC__Number_of_Occurrences__c': None,
u'HOC__HOC_Backend_Domain_Name__c': None,
u'HOC__LastModifiedByV2__c': None,
u'HOC__OwnerIdV2__c': None,
u'HOC__Grouped_Occurrences__c': None,
#u'HOC__Include_Pending_for_Max_Attendance__c': None,
u'HOC__Locations_Details_Page__c': None,
#u'HOC__Maximum_Waitlist__c': None,
#u'HOC__Turn_off_teams__c': None,
#u'HOC__Turn_off_waitlist__c'
# IMPACT
"Additional_Impact__c": None,
"Animals_Served_Cared_For__c": None,
"ConnectionReceivedId": None,
"ConnectionSentId": None,
"Craft_Items_Created_Constructed__c": None,
"Facilities_Maintained_Revitalized__c": None,
"For_Follow_Up__c": None,
"Gardens_Maintained_Created__c": None,
"Individuals_Received_Donations__c": None,
"Individuals_Served_Engaged__c": None,
"Mi_Trail_Beach_Park_Maintained_Created__c": None,
"Potential_Volunteer_Leaders__c": None,
"Pounds_of_Trash_Debris_Collected__c": None,
"Share_a_Story__c": None,
}
for k in oc.keys():
if k in oc_modifier:
if oc_modifier[k] is None:
# skip the data
pass
else:
# assume this is lambda
new_oc[k] = oc_modifier[k](k, oc, new_oc)
else:
new_oc[k] = oc[k]
if debug:
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
print "Modified Data"
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
print json.dumps(new_oc, sort_keys=True, indent=4, separators=(',',':'))
print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# double check the time, there should be no two occurence within the same date - to make this Idempotent
check = sf.query("SELECT Id FROM HOC__Occurrence__c where HOC__Volunteer_Opportunity__c = '%s' and HOC__Start_Date_Time__c = %s" % (
oc['HOC__Volunteer_Opportunity__c'], new_start_datetime_str))
if check['totalSize'] > 0:
print "Skipping - duplicate record found for %s, "%(new_start_tz.strftime('%A')) + str(new_start_tz)
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
return
else:
print "Clone to date: %s, "%(new_start_tz.strftime('%A')) + str(new_start_tz)
print "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
if dry_run:
print("DRY RUN ..")
print "========================================="
else:
print("CREATING OCCURRENCE ..")
result = Occurrence.create(new_oc)
print result
print "========================================="
return 0
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
config = {}
args = argp.parse_args(argv)
try:
for cfg in args.config:
with open(cfg) as yml:
config.update(yaml.load(yml))
except IOError as e:
pass
config['schedule']=config.get('schedule',[]) # ensure schedule exists
username = args.username or config.get('username', 'UNKNOWN')
password = args.password or config.get('password', 'UNKNOWN')
token = args.token or config.get('token', 'UNKNOWN')
mytz = timezone(args.timezone or config.get('timezone', 'US/Pacific'))
dry_run = args.dry
debug = args.debug
if args.occurrence is not None and args.date is not None:
config['schedule'].append({
'occurence':args.occurence,
'date':datetime.strptime(args.date, '%Y-%m-%d')
})
if len(config['schedule'])==0:
print 'No occurence scheduled ..'
return
try:
print 'Logging in as %s'%(username)
session_id, instance = SalesforceLogin(username=username, password=password, security_token=token)
except Exception, e:
print 'Failed to login : %s' % (str(e))
return 1
sf = Salesforce(instance=instance, session_id=session_id)
for sched in config['schedule']:
new_date = datetime.strptime(str(sched['date']), '%Y-%m-%d').date()
clone_occurrence(sf, sched['occurence'], new_date, mytz, dry_run, debug)
return 0
if __name__ == "__main__":
sys.exit(main())
| |
# This file is part of beets.
# Copyright 2013, Jakob Schnitzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Converts tracks or albums to external directory
"""
import logging
import os
import threading
from subprocess import Popen
import tempfile
from string import Template
import pipes
import platform
from beets.plugins import BeetsPlugin
from beets import ui, util
from beetsplug.embedart import _embed
from beets import config
log = logging.getLogger('beets')
_fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion.
# Some convenient alternate names for formats.
ALIASES = {
u'wma': u'windows media',
u'vorbis': u'ogg',
}
def _silent_popen(args):
"""Invoke a command (like subprocess.Popen) while silencing its
error output. Return the Popen object.
"""
# On Windows, close_fds doesn't work (i.e., raises an exception)
# when stderr is redirected.
return Popen(
args,
close_fds=platform.system() != 'Windows',
stderr=open(os.devnull, 'wb'),
)
def _destination(dest_dir, item, keep_new, path_formats):
"""Return the path under `dest_dir` where the file should be placed
(possibly after conversion).
"""
dest = item.destination(basedir=dest_dir, path_formats=path_formats)
if keep_new:
# When we're keeping the converted file, no extension munging
# occurs.
return dest
else:
# Otherwise, replace the extension.
_, ext = get_format()
return os.path.splitext(dest)[0] + ext
def get_format():
"""Get the currently configured format command and extension.
"""
format = config['convert']['format'].get(unicode).lower()
format = ALIASES.get(format, format)
format_info = config['convert']['formats'][format].get(dict)
# Convenience and backwards-compatibility shortcuts.
keys = config['convert'].keys()
if 'command' in keys:
format_info['command'] = config['convert']['command'].get(unicode)
elif 'opts' in keys:
# Undocumented option for backwards compatibility with < 1.3.1.
format_info['command'] = u'ffmpeg -i $source -y {0} $dest'.format(
config['convert']['opts'].get(unicode)
)
if 'extension' in keys:
format_info['extension'] = config['convert']['extension'].get(unicode)
try:
return [a.encode('utf8') for a in format_info['command'].split()], \
(u'.' + format_info['extension']).encode('utf8')
except KeyError:
raise ui.UserError(
u'convert: format {0} needs "command" and "extension" fields'
.format(format)
)
def encode(source, dest):
quiet = config['convert']['quiet'].get()
if not quiet:
log.info(u'Started encoding {0}'.format(util.displayable_path(source)))
command, _ = get_format()
opts = []
for arg in command:
opts.append(Template(arg).safe_substitute({
'source': source,
'dest': dest,
}))
log.debug(u'convert: executing: {0}'.format(
u' '.join(pipes.quote(o.decode('utf8', 'ignore')) for o in opts)
))
encode = _silent_popen(opts)
encode.wait()
if encode.returncode != 0:
# Something went wrong (probably Ctrl+C), remove temporary files
log.info(u'Encoding {0} failed. Cleaning up...'
.format(util.displayable_path(source)))
util.remove(dest)
util.prune_dirs(os.path.dirname(dest))
return
if not quiet:
log.info(u'Finished encoding {0}'.format(
util.displayable_path(source))
)
def should_transcode(item):
"""Determine whether the item should be transcoded as part of
conversion (i.e., its bitrate is high or it has the wrong format).
"""
maxbr = config['convert']['max_bitrate'].get(int)
format_name = config['convert']['format'].get(unicode)
return format_name.lower() != item.format.lower() or \
item.bitrate >= 1000 * maxbr
def convert_item(dest_dir, keep_new, path_formats):
while True:
item = yield
dest = _destination(dest_dir, item, keep_new, path_formats)
if os.path.exists(util.syspath(dest)):
log.info(u'Skipping {0} (target file exists)'.format(
util.displayable_path(item.path)
))
continue
# Ensure that only one thread tries to create directories at a
# time. (The existence check is not atomic with the directory
# creation inside this function.)
with _fs_lock:
util.mkdirall(dest)
# When keeping the new file in the library, we first move the
# current (pristine) file to the destination. We'll then copy it
# back to its old path or transcode it to a new path.
if keep_new:
log.info(u'Moving to {0}'.
format(util.displayable_path(dest)))
util.move(item.path, dest)
if not should_transcode(item):
# No transcoding necessary.
log.info(u'Copying {0}'.format(util.displayable_path(item.path)))
if keep_new:
util.copy(dest, item.path)
else:
util.copy(item.path, dest)
else:
if keep_new:
_, ext = get_format()
item.path = os.path.splitext(item.path)[0] + ext
encode(dest, item.path)
else:
encode(item.path, dest)
# Write tags from the database to the converted file.
if not keep_new:
item.path = dest
item.write()
# If we're keeping the transcoded file, read it again (after
# writing) to get new bitrate, duration, etc.
if keep_new:
item.read()
item.store() # Store new path and audio data.
if config['convert']['embed']:
album = item.get_album()
if album:
artpath = album.artpath
if artpath:
_embed(artpath, [item])
def convert_on_import(lib, item):
"""Transcode a file automatically after it is imported into the
library.
"""
if should_transcode(item):
_, ext = get_format()
fd, dest = tempfile.mkstemp(ext)
os.close(fd)
_temp_files.append(dest) # Delete the transcode later.
encode(item.path, dest)
item.path = dest
item.write()
item.read() # Load new audio information data.
item.store()
def convert_func(lib, opts, args):
dest = opts.dest if opts.dest is not None else \
config['convert']['dest'].get()
if not dest:
raise ui.UserError('no convert destination set')
dest = util.bytestring_path(dest)
threads = opts.threads if opts.threads is not None else \
config['convert']['threads'].get(int)
keep_new = opts.keep_new
if not config['convert']['paths']:
path_formats = ui.get_path_formats()
else:
path_formats = ui.get_path_formats(config['convert']['paths'])
ui.commands.list_items(lib, ui.decargs(args), opts.album, None)
if not ui.input_yn("Convert? (Y/n)"):
return
if opts.album:
items = (i for a in lib.albums(ui.decargs(args)) for i in a.items())
else:
items = iter(lib.items(ui.decargs(args)))
convert = [convert_item(dest, keep_new, path_formats)
for i in range(threads)]
pipe = util.pipeline.Pipeline([items, convert])
pipe.run_parallel()
class ConvertPlugin(BeetsPlugin):
def __init__(self):
super(ConvertPlugin, self).__init__()
self.config.add({
u'dest': None,
u'threads': util.cpu_count(),
u'format': u'mp3',
u'formats': {
u'aac': {
u'command': u'ffmpeg -i $source -y -acodec libfaac '
u'-aq 100 $dest',
u'extension': u'm4a',
},
u'alac': {
u'command': u'ffmpeg -i $source -y -acodec alac $dest',
u'extension': u'm4a',
},
u'flac': {
u'command': u'ffmpeg -i $source -y -acodec flac $dest',
u'extension': u'flac',
},
u'mp3': {
u'command': u'ffmpeg -i $source -y -aq 2 $dest',
u'extension': u'mp3',
},
u'opus': {
u'command': u'ffmpeg -i $source -y -acodec libopus -vn '
u'-ab 96k $dest',
u'extension': u'opus',
},
u'ogg': {
u'command': u'ffmpeg -i $source -y -acodec libvorbis -vn '
u'-aq 2 $dest',
u'extension': u'ogg',
},
u'windows media': {
u'command': u'ffmpeg -i $source -y -acodec wmav2 '
u'-vn $dest',
u'extension': u'wma',
},
},
u'max_bitrate': 500,
u'auto': False,
u'quiet': False,
u'embed': True,
u'paths': {},
})
self.import_stages = [self.auto_convert]
def commands(self):
cmd = ui.Subcommand('convert', help='convert to external location')
cmd.parser.add_option('-a', '--album', action='store_true',
help='choose albums instead of tracks')
cmd.parser.add_option('-t', '--threads', action='store', type='int',
help='change the number of threads, \
defaults to maximum available processors')
cmd.parser.add_option('-k', '--keep-new', action='store_true',
dest='keep_new', help='keep only the converted \
and move the old files')
cmd.parser.add_option('-d', '--dest', action='store',
help='set the destination directory')
cmd.func = convert_func
return [cmd]
def auto_convert(self, config, task):
if self.config['auto']:
for item in task.imported_items():
convert_on_import(config.lib, item)
@ConvertPlugin.listen('import_task_files')
def _cleanup(task, session):
for path in task.old_paths:
if path in _temp_files:
if os.path.isfile(path):
util.remove(path)
_temp_files.remove(path)
| |
from PyQt4 import QtGui, QtCore
from enum import Enum
import PyChemu
class ChipDisplay(QtCore.QObject):
WIDTH = 64
HEIGHT = 32
displayRedraw = QtCore.pyqtSignal()
def __init__(self, parent=None):
QtCore.QObject.__init__(self, parent)
self.__image = QtGui.QImage(self.WIDTH, self.HEIGHT, QtGui.QImage.Format_Mono)
self.__table = self.__image.colorTable()
# flicker prevention
self.__fpEnabled = True
# cache of all sprites drawn, keys being coordinates and values
# being sprite data
# (int, int) -> (int, [int])
self.__spriteCache = {}
@property
def flickerPrevent(self):
return self.__fpEnabled
@flickerPrevent.setter
def flickerPrevent(self, val):
self.__fpEnabled = val
@property
def image(self):
return self.__image
@property
def backgroundColor(self):
return QtGui.QColor(self.__table[0])
@backgroundColor.setter
def backgroundColor(self, value):
self.__table[0] = value.rgb()
self.__image.setColorTable(self.__table)
self.displayRedraw.emit()
@property
def foregroundColor(self):
return QtGui.QColor(self.__table[1])
@foregroundColor.setter
def foregroundColor(self, value):
self.__table[1] = value.rgb()
self.__image.setColorTable(self.__table)
self.displayRedraw.emit()
def clear(self):
self.__image.fill(0)
if self.__fpEnabled:
self.__spriteCache.clear()
self.displayRedraw.emit()
def drawSprite(self, sprite):
collision = False
for rIndex in range(sprite.rows):
row = sprite.data[rIndex]
for col in range(8):
loc = QtCore.QPoint((sprite.x + col) % 64,
(sprite.y + rIndex) % 32)
spritePixel = (row >> (7 - col)) & 1
curPixel = self.__image.pixelIndex(loc)
self.__image.setPixel(loc, spritePixel ^ curPixel)
if not collision and (spritePixel & curPixel) == 1:
collision = True
redraw = True
if self.__fpEnabled:
coord = sprite.x, sprite.y
if coord in self.__spriteCache:
# check if erased
rows, data = self.__spriteCache[coord]
if rows == sprite.rows:
erased = True
for i in range(rows):
if data[i] != sprite.data[i]:
erased = False # sprite data is different
if erased:
del self.__spriteCache[coord]
redraw = False
else:
# add sprite to cache
data = [row for row in sprite.data]
#print(data)
self.__spriteCache[coord] = (sprite.rows, data)
# if self.__fpstate == self._FPState.INIT:
# #self.__fpLastSprite = sprite
# self.__fpstate = self._FPState.CHECK
# elif self.__fpstate == self._FPState.CHECK:
# if self.__fpLastSprite == sprite:
# # this sprite was drawn previously, so it has been erased
# redraw = False # don't redraw just yet
# self.__fpstate = self._FPState.WAIT
# #else:
# # self.__fpLastSprite = sprite
# elif self.__fpstate == self._FPState.WAIT:
# #self.__fpLastSprite = sprite
# self.__fpstate = self._FPState.CHECK
#
# copy = PyChemu.ChipSprite()
# copy.x = sprite.x
# copy.y = sprite.y
# copy.rows = sprite.rows
# for row in range(sprite.rows):
# copy.data[row] = sprite.data[row]
# self.__fpLastSprite = copy
if redraw:
self.displayRedraw.emit()
#self.__refreshDisplay = True
return collision
class ChipDisplayWidget(QtGui.QWidget):
def __init__(self, display, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setObjectName("ChipDisplayWidget")
self.setMinimumSize(64, 32)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.__showContextMenu)
self.__display = display
self.__display.displayRedraw.connect(lambda: self.repaint())
self.__RATIO = float(self.__display.image.width()) / self.__display.image.height()
self.__fixedScale = 1
self.__stretch = True
self.__keepAspectRatio = True
# Context menu
self.__cmenu = QtGui.QMenu("Context menu", self)
self.__sizeActionGroup = QtGui.QActionGroup(self)
arAction = self.__cmenu.addAction("Keep aspect ratio")
arAction.setCheckable(True)
arAction.setChecked(True)
arAction.triggered.connect(self.__arActionTriggered)
self.__cmenu.addSeparator()
stretchAction = self.__cmenu.addAction("Stretch to fit")
stretchAction.setCheckable(True)
stretchAction.setChecked(True)
stretchAction.triggered.connect(self.__stretchActionTriggered)
self.__sizeActionGroup.addAction(stretchAction)
s1 = self.__cmenu.addAction("1x")
s1.setCheckable(True)
s1.triggered.connect(lambda: self.__scaleActionTriggered(1))
self.__sizeActionGroup.addAction(s1)
s2 = self.__cmenu.addAction("2x")
s2.setCheckable(True)
s2.triggered.connect(lambda: self.__scaleActionTriggered(2))
self.__sizeActionGroup.addAction(s2)
s3 = self.__cmenu.addAction("3x")
s3.setCheckable(True)
s3.triggered.connect(lambda: self.__scaleActionTriggered(3))
self.__sizeActionGroup.addAction(s3)
s4 = self.__cmenu.addAction("4x")
s4.setCheckable(True)
s4.triggered.connect(lambda: self.__scaleActionTriggered(4))
self.__sizeActionGroup.addAction(s4)
self.__display.clear()
def __showContextMenu(self, point):
self.__cmenu.exec_(self.mapToGlobal(point))
def __arActionTriggered(self, checked):
self.keepAspectRatio = checked
def __stretchActionTriggered(self):
self.__stretch = True
self.repaint()
def __scaleActionTriggered(self, amount):
if self.__stretch:
self.__stretch = False
self.scale = amount
@property
def scale(self):
return self.__fixedScale
@scale.setter
def scale(self, value):
if value > 0:
self.__fixedScale = value
if not self.__stretch:
self.repaint()
@property
def keepAspectRatio(self):
return self.__keepAspectRatio
@keepAspectRatio.setter
def keepAspectRatio(self, val):
old = self.__keepAspectRatio
self.__keepAspectRatio = val
if old != val:
self.repaint()
@property
def stretchToFit(self):
return self.__stretch
@stretchToFit.setter
def stretchToFit(self, value):
self.__stretch = value
self.repaint()
def paintEvent(self, e):
QtGui.QWidget.paintEvent(self, e)
painter = QtGui.QPainter(self)
painter.fillRect(self.contentsRect(),
self.palette().color(QtGui.QPalette.Active,
QtGui.QPalette.Window))
img = self.__display.image
if self.__stretch:
width = self.width()
height = self.height()
if self.__keepAspectRatio:
ratio = float(self.width()) / self.height()
if ratio > self.__RATIO: # width is too big
width = height * self.__RATIO
elif ratio < self.__RATIO: # height is too big
height = width / self.__RATIO
sx = float(width) / img.width()
sy = float(height) / img.height()
else:
sx = self.__fixedScale
sy = self.__fixedScale
x = (self.width() - (img.width() * sx)) / 2.0
y = (self.height() - (img.height() * sy)) / 2.0
painter.translate(x, y)
painter.scale(sx, sy)
painter.drawImage(0, 0, img)
class DisplayTest(QtCore.QObject):
def __init__(self, disp, parent=None):
QtCore.QObject.__init__(self, parent)
self.__disp = disp
self.__sprite = PyChemu.ChipSprite()
self.__sprite.x = 10
self.__sprite.y = 10
self.__sprite.rows = 4
for i in range(4):
self.__sprite.data[i] = 0xf0
disp.drawSprite(self.__sprite)
def eventFilter(self, obj, evt):
if evt.type() == QtCore.QEvent.KeyPress:
key = evt.key()
if key == QtCore.Qt.Key_P:
self.__disp.flickerPrevent = not self.__disp.flickerPrevent
return True
coords = [self.__sprite.x, self.__sprite.y]
if key == QtCore.Qt.Key_W:
coords[1] -= 1
if key == QtCore.Qt.Key_A:
coords[0] -= 1
if key == QtCore.Qt.Key_S:
coords[1] += 1
if key == QtCore.Qt.Key_D:
coords[0] += 1
self.__disp.drawSprite(self.__sprite)
self.__sprite.x, self.__sprite.y = coords
self.__disp.drawSprite(self.__sprite)
return True
return False
if __name__ == "__main__":
app = QtGui.QApplication([])
disp = ChipDisplay()
disp.foregroundColor = QtGui.QColor(255, 0, 0)
disp.backgroundColor = QtGui.QColor(0, 128, 128)
win = ChipDisplayWidget(disp)
win.resize(640, 480)
win.show()
test = DisplayTest(disp)
win.installEventFilter(test)
app.exec_()
| |
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Lyapunoc Fractal Demo
import Numeric
import math
SIZE = (100, 100)
# ------------------------------------------------------------
# Python + (some) Numeric
# ------------------------------------------------------------
def lyapunov(pattern, r1_range, r2_range, max_init, max_n, size = SIZE):
results = Numeric.zeros(size, typecode = Numeric.Float)
r1_inc = (r1_range[1] - r1_range[0]) / size[0]
r2_inc = (r2_range[1] - r2_range[0]) / size[1]
r1 = r1_range[0] + r1_inc
r2 = r2_range[0] + r2_inc
print r1_range, r2_range, r1_inc, r2_inc, r1, r2
for x in range(size[0]):
r2 = r2_range[0] + r2_inc
print 'col:', x, r1, r2
for y in range(size[1]):
results[y, x] = lyapunov_point(pattern, r1, r2, max_init, max_n)
r2 += r2_inc
r1 += r1_inc
return Numeric.where(Numeric.less(results, 0), results, 0)
def lyapunov_point(pattern, r1, r2, max_init, max_n, x0 = 0.5):
r_idx = 0
r_max = len(pattern)
x = x0
rs = (r1, r2)
r = [rs[i] for i in pattern]
# Init
for i in range(max_init):
x = r[i % r_max] * x * (1.0 - x)
# print (r[i % r_max], x),
# print
if x == float('-infinity'):
return -10.0
# Derive Exponent
total = 0.0
try:
for i in range(max_n):
ri = r[i % r_max]
x = ri * x * (1.0 - x)
# print ri, x, math.log(abs(ri - 2 * ri * x)), math.log(2)
total = total + math.log(abs(ri - 2.0 * ri * x), 2) # / math.log(2)
except:
print 'oops:', ri, x
# print total, float(max_n), total / float(max_n)
return total / float(max_n)
# ------------------------------------------------------------
# AltiVec
# ------------------------------------------------------------
# import corepy.arch.ppc.isa as ppc
# import corepy.arch.ppc.platform as synppc
# import corepy.arch.ppc.lib.iterators as ppc_iter
# import corepy.arch.ppc.types.ppc_types as ppc_types
# import corepy.arch.vmx.isa as vmx
# import corepy.arch.vmx.types.vmx_types as vmx_types
# def synthesize_lyapunov_point_vmx(code, r_vecs, r1, r2, x0, result, max_init, max_n):
# x = vmx_types.SingleFloat()
# r = vmx_types.SingleFloat()
# t1 = vmx_types.SingleFloat()
# t2 = vmx_types.SingleFloat()
# # Init
# x.v = x0
# for i in ppc_iter.syn_ite(code, max_init, mode = ppc_iter.CTR):
# x.v = r * x * (1.0 - x)
# # if x == float('-infinity'):
# # return -10.0
# # Derive Exponent
# total = vmx_types.SingleFloat()
# total = total * 0.0
# for i in ppc_iter.syn_ite(code, max_n, mode = ppc_iter.CTR):
# r.v = load_r()
# x.v = r * x * (1.0 - x)
# t1.v = r - 2.0 * r * x
# t2.v = t1 * -1.0
# total.v = total + vmx.log.ex(vmx.vmaxfp.ex(t1, t2))
# result.v = total / max_n
# return result
# def synthesize_lyapunov_vmx(code, pattern, rx_range, ry_range, max_init, max_n, size = SIZE):
# old_code = ppc.get_active_code()
# ppc.set_active_code(code)
# # Create Numeric arrays for the results, r values, and pattern
# results = Numeric.zeros(size, typecode = Numeric.Float32)
# rx_inc = (rx_range[1] - rx_range[0]) / size[0]
# ry_inc = (ry_range[1] - ry_range[0]) / size[1]
# r_inc = Numeric.array((rx_inc, rx_inc, rx_inc, rx_inc,
# ry_inc, ry_inc, ry_inc, ry_inc),
# typecode = Numeric.Float32)
# rx = rx_range[0] + rx_inc
# ry = ry_range[0] + ry_inc
# r_init = Numeric.array((rx, rx + rx_inc, rx + rx_inc * 2, rx + rx_inc * 3,
# ry, ry, ry, ry),
# typecode = Numeric.Float32)
# rs = (rx, ry)
# r_vecs = [[rs[i]] * 4 for i in pattern]
# r_vecs = reduce(lambda a, b: a + b, r_vecs, [])
# r_vecs = Numeric.array(r_vecs, typecode = Numeric.Float32)
# x0_array = Numeric.array((.5, .5, .5, .5), typecode = Numeric.Float32)
# # Synthetic Variables
# temp = ppc_types.UnsigedWord(0)
# results_addr = ppc_types.UnsigedWord(synppc.array_address(results))
# r_inc_addr = ppc_types.UnsigedWord(synppc.array_address(r_inc))
# r_init_addr = ppc_types.UnsigedWord(synppc.array_address(r_init))
# r_vecs_addr = ppc_types.UnsigedWord(synppc.array_address(r_vecs))
# x0_addr = ppc_types.UnsigedWord(synppc.array_address(x0_array))
# rx = vmx_types.SingleFloat()
# ry = vmx_types.SingleFloat()
# x0 = vmx_types.SingleFloat()
# result = vmx_types.SingleFloat()
# rx_init = vmx_types.SingleFloat()
# ry_init = vmx_types.SingleFloat()
# rx_inc = vmx_types.SingleFloat()
# ry_inc = vmx_types.SingleFloat()
# # Load the values values for r into registers
# ppc.lvx(rx_init, 0, r_init_addr)
# ppc.lvx(rx_inc, 0, r_inc_addr)
# temp.v = 16
# ppc.lvx(ry_init, temp, r_init_addr)
# ppc.lvx(ry_inc, temp, r_inc_addr)
# ppc.lvx(x0, 0, x0_addr)
# # Main loop
# for y in ppc_iter.syn_range(size[1]):
# rx.v = rx_init
# for x in ppc_iter.syn_range(size[0] / 4, 4):
# synthesize_lyapunov_point_vmx(code, r_vecs, r1, r2, x0, result, max_init, max_n):
# rx.v = rx + rx_inc
# # TODO: STORE RESULT
# # results[y, x] = lyapunov_point(pattern, rx, ry, max_init, max_n)
# ry.v = ry + ry_inc
# return
import wx
class FractalData:
def __init__(self, data = None):
self._data = data
return
def SetData(self, data):
self._data = data
return
def Draw(self, dc):
if self._data is None: return
h, w = self._data.shape
self._data.shape = (h*w,)
mn = -1.0 # min(self._data)
self._data.shape = (h,w)
# print mn
shaded = self._data / mn * 255.0
for y in range(h):
for x in range(w):
# print self._data[y, x]
if self._data[y, x] < 0.0:
if self._data[y, x] > -1.0:
shade = int(shaded[y, x]) % 255
if self._data[y, x] > -10.0:
shade = 255 - int(shaded[y, x]) % 255
else:
shade = 1.0
# print shade
dc.SetPen(wx.Pen(wx.Colour(shade, shade, shade)))
dc.DrawPoint(x, y)
# print '------------------------------'
return
class FractalPlot(wx.Window):
"""
Simple 2D plot window. Data and axes are set by the user and managed by
this class. The Draw() method calls the draw methods on the axis and data.
The display can be copied to the clipboard using ctl-c.
"""
PublicMethods = ('GetXAxis', 'SetXAxis', 'GetYAxis', 'SetYAxis',
'GetData', 'AddData', 'ClearData',
'ShowAxisLabels', 'Clear', 'CopyToClipboard',
'IsInViewport', 'BoundXY', 'BoundX', 'BoundY',
'MouseToData', 'Draw',
'SetStatusFrame',
'_lastDataViewport')
def __init__(self, parent, id = -1, style = 0, catchMotion = 1, catchChar = 1):
wx.Window.__init__(self, parent, id, style = style)
self.SetBackgroundColour(wx.WHITE)
self._data = []
wx.EVT_PAINT(self, self.OnPaint)
return
def AddData(self, data):
self._data.append(data)
return
def CopyToClipboard(self):
"""
Copy the drawing to the clipboard as a bitmap.
"""
w, h = self.GetSize()
bmp = wx.EmptyBitmap(w, h)
memDC = wx.MemoryDC()
memDC.SelectObject(bmp)
plotDC = wx.ClientDC(self)
memDC.Blit(0, 0, w, h, plotDC, 0, 0)
if wx.TheClipboard.Open():
data = wx.BitmapDataObject(bmp)
wx.TheClipboard.SetData(data)
wx.TheClipboard.Close()
return
def Draw(self, dc):
"""
Draw everything!
"""
print 'Drawing...'
for data in self._data:
data.Draw(dc)
print 'Done.'
return
# Event handlers
def OnPaint(self, event):
dc = wx.PaintDC(self)
dc.BeginDrawing()
dc.SetBrush(wx.TRANSPARENT_BRUSH)
self.Draw(dc)
dc.EndDrawing()
return
if __name__=='__main__':
class App(wx.App):
def OnInit(self):
self.ShowLyapunovPlot()
return True
def ShowLyapunovPlot(self):
frame = wx.Frame(None, -1, 'Lyapunov')
frame.SetSize(SIZE)
plot = FractalPlot(frame)
if True:
raw_data = lyapunov([0,1], [2.0, 4.0], [2.0, 4.0], 200, 400)
else:
raw_data = Numeric.zeros(SIZE, typecode = Numeric.Float)
for i in range(100):
i_start = i - 100
row = Numeric.arange(i_start, i)
raw_data[i, :] = row
fractal_data = FractalData(raw_data)
plot.AddData(fractal_data)
frame.Show(True)
return
app = App(0)
app.MainLoop()
| |
# Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
from __future__ import annotations
import typing
import asyncio
import pytest
import pyuavcan.transport
from pyuavcan.transport import Timestamp
from pyuavcan.transport.can.media import Media, Envelope, FilterConfiguration, DataFrame, FrameFormat
pytestmark = pytest.mark.asyncio
class MockMedia(Media):
def __init__(self, peers: typing.Set[MockMedia], mtu: int, number_of_acceptance_filters: int):
self._peers = peers
peers.add(self)
self._mtu = int(mtu)
self._rx_handler: Media.ReceivedFramesHandler = lambda _: None # pragma: no cover
self._acceptance_filters = [
self._make_dead_filter() # By default drop (almost) all frames
for _ in range(int(number_of_acceptance_filters))
]
self._automatic_retransmission_enabled = False # This is the default per the media interface spec
self._closed = False
self._raise_on_send_once: typing.Optional[Exception] = None
super().__init__()
@property
def loop(self) -> asyncio.AbstractEventLoop:
return asyncio.get_event_loop()
@property
def interface_name(self) -> str:
return f"mock@{id(self._peers):08x}"
@property
def mtu(self) -> int:
return self._mtu
@property
def number_of_acceptance_filters(self) -> int:
return len(self._acceptance_filters)
def start(self, handler: Media.ReceivedFramesHandler, no_automatic_retransmission: bool) -> None:
if self._closed:
raise pyuavcan.transport.ResourceClosedError
assert callable(handler)
self._rx_handler = handler
assert isinstance(no_automatic_retransmission, bool)
self._automatic_retransmission_enabled = not no_automatic_retransmission
def configure_acceptance_filters(self, configuration: typing.Sequence[FilterConfiguration]) -> None:
if self._closed:
raise pyuavcan.transport.ResourceClosedError
configuration = list(configuration) # Do not mutate the argument
while len(configuration) < len(self._acceptance_filters):
configuration.append(self._make_dead_filter())
assert len(configuration) == len(self._acceptance_filters)
self._acceptance_filters = configuration
@property
def automatic_retransmission_enabled(self) -> bool:
return self._automatic_retransmission_enabled
@property
def acceptance_filters(self) -> typing.List[FilterConfiguration]:
return list(self._acceptance_filters)
async def send(self, frames: typing.Iterable[Envelope], monotonic_deadline: float) -> int:
del monotonic_deadline # Unused
if self._closed:
raise pyuavcan.transport.ResourceClosedError
if self._raise_on_send_once:
self._raise_on_send_once, ex = None, self._raise_on_send_once
assert isinstance(ex, Exception)
raise ex
frames = list(frames)
assert len(frames) > 0, "Interface constraint violation: empty transmission set"
assert min(map(lambda x: len(x.frame.data), frames)) >= 1, "CAN frames with empty payload are not valid"
# The media interface spec says that it is guaranteed that the CAN ID is the same across the set; enforce this.
assert len(set(map(lambda x: x.frame.identifier, frames))) == 1, "Interface constraint violation: nonuniform ID"
timestamp = Timestamp.now()
# Broadcast across the virtual bus we're emulating here.
for p in self._peers:
if p is not self:
# Unconditionally clear the loopback flag because for the other side these are
# regular received frames, not loopback frames.
p._receive( # pylint: disable=protected-access
(timestamp, Envelope(f.frame, loopback=False)) for f in frames
)
# Simple loopback emulation with acceptance filtering.
self._receive((timestamp, f) for f in frames if f.loopback)
return len(frames)
def close(self) -> None:
if not self._closed:
self._closed = True
self._peers.remove(self)
def raise_on_send_once(self, ex: Exception) -> None:
self._raise_on_send_once = ex
def inject_received(self, frames: typing.Iterable[typing.Union[Envelope, DataFrame]]) -> None:
timestamp = Timestamp.now()
self._receive(
(
timestamp,
(f if isinstance(f, Envelope) else Envelope(frame=f, loopback=False)),
)
for f in frames
)
def _receive(self, frames: typing.Iterable[typing.Tuple[Timestamp, Envelope]]) -> None:
frames = list(filter(lambda item: self._test_acceptance(item[1].frame), frames))
if frames: # Where are the assignment expressions when you need them?
self._rx_handler(frames)
def _test_acceptance(self, frame: DataFrame) -> bool:
return any(
map(
lambda f: frame.identifier & f.mask == f.identifier & f.mask
and (f.format is None or frame.format == f.format),
self._acceptance_filters,
)
)
@staticmethod
def list_available_interface_names() -> typing.Iterable[str]:
return [] # pragma: no cover
@staticmethod
def _make_dead_filter() -> FilterConfiguration:
fmt = FrameFormat.BASE
return FilterConfiguration(0, 2 ** int(fmt) - 1, fmt)
async def _unittest_can_mock_media() -> None:
peers: typing.Set[MockMedia] = set()
me = MockMedia(peers, 64, 3)
assert len(peers) == 1 and me in peers
assert me.mtu == 64
assert me.number_of_acceptance_filters == 3
assert not me.automatic_retransmission_enabled
assert str(me) == f"MockMedia('mock@{id(peers):08x}', mtu=64)"
me_collector = FrameCollector()
me.start(me_collector.give, False)
assert me.automatic_retransmission_enabled
# Will drop the loopback because of the acceptance filters
await me.send(
[
Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b"abc")), loopback=False),
Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b"def")), loopback=True),
],
asyncio.get_event_loop().time() + 1.0,
)
assert me_collector.empty
me.configure_acceptance_filters([FilterConfiguration.new_promiscuous()])
# Now the loopback will be accepted because we have reconfigured the filters
await me.send(
[
Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b"abc")), loopback=False),
Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b"def")), loopback=True),
],
asyncio.get_event_loop().time() + 1.0,
)
assert me_collector.pop()[1].frame == DataFrame(FrameFormat.EXTENDED, 123, bytearray(b"def"))
assert me_collector.empty
pe = MockMedia(peers, 8, 1)
assert peers == {me, pe}
pe_collector = FrameCollector()
pe.start(pe_collector.give, False)
me.raise_on_send_once(RuntimeError("Hello world!"))
with pytest.raises(RuntimeError, match="Hello world!"):
await me.send([], asyncio.get_event_loop().time() + 1.0)
await me.send(
[
Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b"abc")), loopback=False),
Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b"def")), loopback=True),
],
asyncio.get_event_loop().time() + 1.0,
)
assert pe_collector.empty
pe.configure_acceptance_filters([FilterConfiguration(123, 127, None)])
await me.send(
[
Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b"abc")), loopback=False),
Envelope(DataFrame(FrameFormat.EXTENDED, 123, bytearray(b"def")), loopback=True),
],
asyncio.get_event_loop().time() + 1.0,
)
await me.send(
[
Envelope(DataFrame(FrameFormat.EXTENDED, 456, bytearray(b"ghi")), loopback=False), # Dropped by the filters
],
asyncio.get_event_loop().time() + 1.0,
)
assert pe_collector.pop()[1].frame == DataFrame(FrameFormat.EXTENDED, 123, bytearray(b"abc"))
assert pe_collector.pop()[1].frame == DataFrame(FrameFormat.EXTENDED, 123, bytearray(b"def"))
assert pe_collector.empty
me.close()
me.close() # Idempotency.
assert peers == {pe}
with pytest.raises(pyuavcan.transport.ResourceClosedError):
await me.send([], asyncio.get_event_loop().time() + 1.0)
with pytest.raises(pyuavcan.transport.ResourceClosedError):
me.configure_acceptance_filters([])
await asyncio.sleep(1) # Let all pending tasks finalize properly to avoid stack traces in the output.
class FrameCollector:
def __init__(self) -> None:
self._collected: typing.List[typing.Tuple[Timestamp, Envelope]] = []
def give(self, frames: typing.Iterable[typing.Tuple[Timestamp, Envelope]]) -> None:
frames = list(frames)
assert all(map(lambda x: isinstance(x[0], Timestamp) and isinstance(x[1], Envelope), frames))
self._collected += frames
def pop(self) -> typing.Tuple[Timestamp, Envelope]:
head, *self._collected = self._collected
return head
@property
def empty(self) -> bool:
return len(self._collected) == 0
def __repr__(self) -> str: # pragma: no cover
return f"{type(self).__name__}({str(self._collected)})"
| |
import logging
import libsolace
from libsolace import Plugin
from libsolace.SolaceCommandQueue import SolaceCommandQueue
from libsolace.SolaceXMLBuilder import SolaceXMLBuilder
from libsolace.SolaceAPI import SolaceAPI
from libsolace.items.SolaceQueue import SolaceQueue
@libsolace.plugin_registry.register
class SolaceBridge(Plugin):
""" Construct a bridge between two appliance clusters to link specific VPN's. This Plugin is still being developed,
and is NOT ready for production. """
def __init__(self, testmode=True, shutdown_on_apply=False, options=None, version=None, **kwargs):
""" Init user object
:type testmode: boolean
:type shutdown_on_apply: boolean
:type options: OptionParser
:type version: string
"""
logging.debug("options: %s" % options)
self.cq = SolaceCommandQueue(version=version)
self.primaryCluster = SolaceAPI(options.primary, testmode=testmode, version=version)
self.drCluster = SolaceAPI(options.backup, testmode=testmode, version=version)
self.vpns = []
for vpn in options.vpns:
try:
self.vpns.append(vpn % options.environment)
except Exception, e:
self.vpns.append(vpn)
for vpn in self.vpns:
try:
bridgeName = vpn % options.environment
except Exception, e:
bridgeName = vpn
logging.info("Creating Bridge: %s" % bridgeName)
primaryBridgeName = "%s_%s" % ("primary", bridgeName)
backupBridgeName = "%s_%s" % ("backup", bridgeName)
logging.info("Primary Bridge Name: %s" % primaryBridgeName)
logging.info("Backup Bridge Name: %s" % backupBridgeName)
# create bridge on primary cluster
self._create_bridge(self.primaryCluster, primaryBridgeName, vpn,
version=version)
# create bridge on the DR cluster
self._create_bridge(self.drCluster, backupBridgeName, vpn,
version=version)
# create remote on primary cluster bridge
self._create_bridge_remote_addr(self.primaryCluster, primaryBridgeName, vpn,
options.backup_addr, options.primary_phys_intf, version=version)
# create reverse remote on dr cluster bridge
self._create_bridge_remote_vrouter(self.drCluster, backupBridgeName, vpn,
options.primary_cluster_primary_node_name, version=version)
# create remote username on primary cluster bridge
self._bridge_username_addr(self.primaryCluster, primaryBridgeName, vpn,
options.backup_addr, options.primary_phys_intf, options.username,
options.password, version=version)
# create remote username on backup cluster bridge
self._bridge_username_vrouter(self.drCluster, backupBridgeName, vpn,
options.primary_cluster_primary_node_name, options.username,
options.password, version=version)
# enable all bridges
self._bridge_enable(self.primaryCluster, primaryBridgeName, vpn, version=version)
self._bridge_enable(self.drCluster, backupBridgeName, vpn, version=version)
# enable all remotes
self._bridge_enable_remote_addr(self.primaryCluster, primaryBridgeName, vpn,
options.backup_addr, options.primary_phys_intf, version=version)
self._bridge_enable_remote_vrouter(self.drCluster, backupBridgeName, vpn,
options.primary_cluster_primary_node_name, version=version)
# create bridge internal queues
self._bridge_create_queue(self.primaryCluster, options.queue, vpn, options.username, version=version)
self._bridge_create_queue(self.drCluster, options.queue, vpn, options.username, version=version)
# set remote internal queues
self._bridge_set_remote_queue_addr(self.primaryCluster, primaryBridgeName, vpn,
options.backup_addr, options.primary_phys_intf, options.queue,
version=version)
self._bridge_set_remote_queue_vrouter(self.drCluster, backupBridgeName, vpn,
options.primary_cluster_primary_node_name, options.queue,
version=version)
def _create_bridge(self, api, bridgeName, vpn, **kwargs):
api.x = SolaceXMLBuilder("%s create primary bridge: %s on primary appliance" % (api.primaryRouter, bridgeName),
version=api.version)
api.x.create.bridge.bridge_name = bridgeName
api.x.create.bridge.vpn_name = vpn
api.x.create.bridge.primary
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s create backup bridge: %s on backup appliance" % (api.backupRouter, bridgeName),
version=api.version)
api.x.create.bridge.bridge_name = bridgeName
api.x.create.bridge.vpn_name = vpn
api.x.create.bridge.backup
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _create_bridge_remote_vrouter(self, api, bridgeName, vpn, virtual_router, **kwargs):
api.x = SolaceXMLBuilder("%s configure primary bridge: %s vrouter: %s on primary appliance" % (
api.primaryRouter, bridgeName, virtual_router), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.create.message_vpn.vpn_name = vpn
api.x.bridge.remote.create.message_vpn.router
api.x.bridge.remote.create.message_vpn.virtual_router_name = "v:%s" % virtual_router
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s configure backup bridge: %s vrouter: %s on backup appliance" % (
api.backupRouter, bridgeName, virtual_router), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.create.message_vpn.vpn_name = vpn
api.x.bridge.remote.create.message_vpn.router
api.x.bridge.remote.create.message_vpn.virtual_router_name = "v:%s" % virtual_router
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _create_bridge_remote_addr(self, api, bridgeName, vpn, backup_addr, phys_intf, **kwargs):
api.x = SolaceXMLBuilder(
"%s configure primary bridge: %s remote addr: %s phys_intf: %s on primary appliance" % (
api.primaryRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.create.message_vpn.vpn_name = vpn
api.x.bridge.remote.create.message_vpn.connect_via
api.x.bridge.remote.create.message_vpn.addr = backup_addr
api.x.bridge.remote.create.message_vpn.interface
api.x.bridge.remote.create.message_vpn.phys_intf = phys_intf
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s configure backup bridge: %s remote addr: %s phys_intf: %s on backup appliance" % (
api.backupRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.create.message_vpn.vpn_name = vpn
api.x.bridge.remote.create.message_vpn.connect_via
api.x.bridge.remote.create.message_vpn.addr = backup_addr
api.x.bridge.remote.create.message_vpn.interface
api.x.bridge.remote.create.message_vpn.phys_intf = phys_intf
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_username_addr(self, api, bridgeName, vpn, backup_addr, phys_intf, username, password, **kwargs):
api.x = SolaceXMLBuilder("%s primary bridge: %s remote username: %s on primary appliance" % (
api.primaryRouter, bridgeName, username), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.client_username.name = username
api.x.bridge.remote.message_vpn.client_username.password = password
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder(
"%s backup bridge: %s remote username: %s on backup appliance" % (api.backupRouter, bridgeName, username),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.client_username.name = username
api.x.bridge.remote.message_vpn.client_username.password = password
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_username_vrouter(self, api, bridgeName, vpn, vrouter, username, password, **kwargs):
api.x = SolaceXMLBuilder("%s primary bridge: %s remote username: %s on primary appliance" % (
api.primaryRouter, bridgeName, username), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.client_username.name = username
api.x.bridge.remote.message_vpn.client_username.password = password
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder(
"%s backup bridge: %s remote username: %s on backup appliance" % (api.backupRouter, bridgeName, username),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.client_username.name = username
api.x.bridge.remote.message_vpn.client_username.password = password
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_enable(self, api, bridgeName, vpn, **kwargs):
api.x = SolaceXMLBuilder(
"%s enable bridge: %s for vpn: %s on primary appliance" % (api.primaryRouter, bridgeName, vpn),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.no.shutdown
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder(
"%s enable bridge: %s for vpn: %s on backup appliance" % (api.backupRouter, bridgeName, vpn),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.no.shutdown
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_enable_remote_addr(self, api, bridgeName, vpn, backup_addr, phys_intf, **kwargs):
api.x = SolaceXMLBuilder("%s enable primary bridge: %s remote addr: %s phys_intf: %s on primary appliance" % (
api.primaryRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.no.shutdown
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s enable backup bridge: %s remote addr: %s phys_intf: %s on backup appliance" % (
api.backupRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.no.shutdown
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_enable_remote_vrouter(self, api, bridgeName, vpn, vrouter, **kwargs):
api.x = SolaceXMLBuilder("%s enable primary bridge: %s vrouter: %s" % (api.primaryRouter, bridgeName, vrouter),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.no.shutdown
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s enable backup bridge: %s vrouter: %s" % (api.backupRouter, bridgeName, vrouter),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.no.shutdown
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_disable_remote_addr(self, api, bridgeName, vpn, backup_addr, phys_intf, **kwargs):
api.x = SolaceXMLBuilder("%s disable primary bridge: %s remote addr: %s phys_intf: %s on primary appliance" % (
api.primaryRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.shutdown
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s disable backup bridge: %s remote addr: %s phys_intf: %s on backup appliance" % (
api.backupRouter, bridgeName, backup_addr, phys_intf), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.shutdown
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_disable_remote_vrouter(self, api, bridgeName, vpn, vrouter, **kwargs):
api.x = SolaceXMLBuilder("%s enable primary bridge: %s vrouter: %s" % (api.primaryRouter, bridgeName, vrouter),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.shutdown
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder("%s enable backup bridge: %s vrouter: %s" % (api.backupRouter, bridgeName, vrouter),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.shutdown
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_create_queue(self, api, queueName, vpnName, username, **kwargs):
logging.info("%s:%s creating bridge queue: %s with owner username: %s" % (
api.primaryRouter, api.backupRouter, queueName, username))
queue1 = {}
queue1['queue_config'] = {}
queue1['queue_config']["exclusive"] = "true"
queue1['queue_config']["queue_size"] = "4096"
queue1['queue_config']["retries"] = 0
queue1["name"] = queueName
vpnd = {}
vpnd['vpn_name'] = vpnName
vpnd['owner_username'] = username
q1 = SolaceQueue(api, vpnd, [queue1])
for c in q1.queue.commands:
api.cq.enqueue(str(api.x))
def _bridge_set_remote_queue_addr(self, api, bridgeName, vpn, backup_addr, phys_intf, queueName, **kwargs):
api.x = SolaceXMLBuilder("%s primary bridge: %s set remote queue: %s on primary appliance" % (
api.primaryRouter, bridgeName, queueName), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.message_spool.queue.name = queueName
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder(
"%s backup bridge: %s set remote queue: %s on backup appliance" % (api.backupRouter, bridgeName, queueName),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.connect_via
api.x.bridge.remote.message_vpn.addr = backup_addr
api.x.bridge.remote.message_vpn.interface
api.x.bridge.remote.message_vpn.phys_intf = phys_intf
api.x.bridge.remote.message_vpn.message_spool.queue.name = queueName
api.cq.enqueueV2(str(api.x), backupOnly=True)
def _bridge_set_remote_queue_vrouter(self, api, bridgeName, vpn, vrouter, queueName, **kwargs):
api.x = SolaceXMLBuilder("%s primary bridge: %s set remote queue: %s on primary appliance" % (
api.primaryRouter, bridgeName, queueName), version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.primary
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.message_spool.queue.name = queueName
api.cq.enqueueV2(str(api.x), primaryOnly=True)
api.x = SolaceXMLBuilder(
"%s backup bridge: %s set remote queue: %s on backup appliance" % (api.backupRouter, bridgeName, queueName),
version=api.version)
api.x.bridge.bridge_name = bridgeName
api.x.bridge.vpn_name = vpn
api.x.bridge.backup
api.x.bridge.remote.message_vpn.vpn_name = vpn
api.x.bridge.remote.message_vpn.router
api.x.bridge.remote.message_vpn.virtual_router_name = "v:%s" % vrouter
api.x.bridge.remote.message_vpn.message_spool.queue.name = queueName
api.cq.enqueueV2(str(api.x), backupOnly=True)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Box Head.
Contains Box prediction head classes for different meta architectures.
All the box prediction heads have a predict function that receives the
`features` as the first argument and returns `box_encodings`.
"""
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.predictors.heads import head
class MaskRCNNBoxHead(head.Head):
"""Box prediction head.
Please refer to Mask RCNN paper:
https://arxiv.org/abs/1703.06870
"""
def __init__(self,
is_training,
num_classes,
fc_hyperparams_fn,
use_dropout,
dropout_keep_prob,
box_code_size,
share_box_across_classes=False):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
fc_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for fully connected ops.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
box_code_size: Size of encoding for each box.
share_box_across_classes: Whether to share boxes across classes rather
than use a different box for each class.
"""
super(MaskRCNNBoxHead, self).__init__()
self._is_training = is_training
self._num_classes = num_classes
self._fc_hyperparams_fn = fc_hyperparams_fn
self._use_dropout = use_dropout
self._dropout_keep_prob = dropout_keep_prob
self._box_code_size = box_code_size
self._share_box_across_classes = share_box_across_classes
def predict(self, features, num_predictions_per_location=1):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width,
channels] containing features for a batch of images.
num_predictions_per_location: Int containing number of predictions per
location.
Returns:
box_encodings: A float tensor of shape
[batch_size, 1, num_classes, code_size] representing the location of the
objects.
Raises:
ValueError: If num_predictions_per_location is not 1.
"""
if num_predictions_per_location != 1:
raise ValueError('Only num_predictions_per_location=1 is supported')
spatial_averaged_roi_pooled_features = tf.reduce_mean(
features, [1, 2], keep_dims=True, name='AvgPool')
flattened_roi_pooled_features = slim.flatten(
spatial_averaged_roi_pooled_features)
if self._use_dropout:
flattened_roi_pooled_features = slim.dropout(
flattened_roi_pooled_features,
keep_prob=self._dropout_keep_prob,
is_training=self._is_training)
number_of_boxes = 1
if not self._share_box_across_classes:
number_of_boxes = self._num_classes
with slim.arg_scope(self._fc_hyperparams_fn()):
box_encodings = slim.fully_connected(
flattened_roi_pooled_features,
number_of_boxes * self._box_code_size,
reuse=tf.AUTO_REUSE,
activation_fn=None,
scope='BoxEncodingPredictor')
box_encodings = tf.reshape(box_encodings,
[-1, 1, number_of_boxes, self._box_code_size])
return box_encodings
class ConvolutionalBoxHead(head.Head):
"""Convolutional box prediction head."""
def __init__(self,
is_training,
box_code_size,
kernel_size,
use_depthwise=False,
box_encodings_clip_range=None):
"""Constructor.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping box_encodings.
Raises:
ValueError: if min_depth > max_depth.
ValueError: if use_depthwise is True and kernel_size is 1.
"""
if use_depthwise and (kernel_size == 1):
raise ValueError('Should not use 1x1 kernel when using depthwise conv')
super(ConvolutionalBoxHead, self).__init__()
self._is_training = is_training
self._box_code_size = box_code_size
self._kernel_size = kernel_size
self._use_depthwise = use_depthwise
self._box_encodings_clip_range = box_encodings_clip_range
def predict(self, features, num_predictions_per_location):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
num_predictions_per_location: Number of box predictions to be made per
spatial location. Int specifying number of boxes per location.
Returns:
box_encodings: A float tensors of shape
[batch_size, num_anchors, q, code_size] representing the location of
the objects, where q is 1 or the number of classes.
"""
net = features
if self._use_depthwise:
box_encodings = slim.separable_conv2d(
net, None, [self._kernel_size, self._kernel_size],
padding='SAME', depth_multiplier=1, stride=1,
rate=1, scope='BoxEncodingPredictor_depthwise')
box_encodings = slim.conv2d(
box_encodings,
num_predictions_per_location * self._box_code_size, [1, 1],
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
scope='BoxEncodingPredictor')
else:
box_encodings = slim.conv2d(
net, num_predictions_per_location * self._box_code_size,
[self._kernel_size, self._kernel_size],
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
scope='BoxEncodingPredictor')
batch_size = features.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(features)[0]
# Clipping the box encodings to make the inference graph TPU friendly.
if self._box_encodings_clip_range is not None:
box_encodings = tf.clip_by_value(
box_encodings, self._box_encodings_clip_range.min,
self._box_encodings_clip_range.max)
box_encodings = tf.reshape(box_encodings,
[batch_size, -1, 1, self._box_code_size])
return box_encodings
# TODO(alirezafathi): See if possible to unify Weight Shared with regular
# convolutional box head.
class WeightSharedConvolutionalBoxHead(head.Head):
"""Weight shared convolutional box prediction head.
This head allows sharing the same set of parameters (weights) when called more
then once on different feature maps.
"""
def __init__(self,
box_code_size,
kernel_size=3,
use_depthwise=False,
box_encodings_clip_range=None,
return_flat_predictions=True):
"""Constructor.
Args:
box_code_size: Size of encoding for each box.
kernel_size: Size of final convolution kernel.
use_depthwise: Whether to use depthwise convolutions for prediction steps.
Default is False.
box_encodings_clip_range: Min and max values for clipping box_encodings.
return_flat_predictions: If true, returns flattened prediction tensor
of shape [batch, height * width * num_predictions_per_location,
box_coder]. Otherwise returns the prediction tensor before reshaping,
whose shape is [batch, height, width, num_predictions_per_location *
num_class_slots].
Raises:
ValueError: if use_depthwise is True and kernel_size is 1.
"""
if use_depthwise and (kernel_size == 1):
raise ValueError('Should not use 1x1 kernel when using depthwise conv')
super(WeightSharedConvolutionalBoxHead, self).__init__()
self._box_code_size = box_code_size
self._kernel_size = kernel_size
self._use_depthwise = use_depthwise
self._box_encodings_clip_range = box_encodings_clip_range
self._return_flat_predictions = return_flat_predictions
def predict(self, features, num_predictions_per_location):
"""Predicts boxes.
Args:
features: A float tensor of shape [batch_size, height, width, channels]
containing image features.
num_predictions_per_location: Number of box predictions to be made per
spatial location.
Returns:
box_encodings: A float tensor of shape
[batch_size, num_anchors, code_size] representing the location of
the objects, or a float tensor of shape [batch, height, width,
num_predictions_per_location * box_code_size] representing grid box
location predictions if self._return_flat_predictions is False.
"""
box_encodings_net = features
if self._use_depthwise:
conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1)
else:
conv_op = slim.conv2d
box_encodings = conv_op(
box_encodings_net,
num_predictions_per_location * self._box_code_size,
[self._kernel_size, self._kernel_size],
activation_fn=None, stride=1, padding='SAME',
normalizer_fn=None,
scope='BoxPredictor')
batch_size = features.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(features)[0]
# Clipping the box encodings to make the inference graph TPU friendly.
if self._box_encodings_clip_range is not None:
box_encodings = tf.clip_by_value(
box_encodings, self._box_encodings_clip_range.min,
self._box_encodings_clip_range.max)
if self._return_flat_predictions:
box_encodings = tf.reshape(box_encodings,
[batch_size, -1, self._box_code_size])
return box_encodings
| |
"""KLEE test cases"""
# vim: set sw=4 ts=4 softtabstop=4 expandtab:
import os
import re
import glob
import logging
from collections import namedtuple
from ..exceptions import InputError
_logger = logging.getLogger(__name__)
def _force_match(regex, line, message, path):
match = regex.fullmatch(line)
if not match:
raise InputError(message.format(path))
return match
Early = namedtuple("Early", ["message"])
def _parse_early(path):
"""Load a .early file"""
assert os.path.exists(os.path.dirname(path))
try:
with open(path) as file:
return Early(file.readlines())
except FileNotFoundError:
return None
ErrorFile = namedtuple("ErrorFile", ["message", "file", "line", "assembly_line", "stack"])
_RE_ERROR = re.compile(r"Error: (.*)\r?\n")
_RE_FILE = re.compile(r"File: (.*)\r?\n")
_RE_LINE = re.compile(r"Line: (\d+)\r?\n")
_RE_ASSEMBLY_LINE = re.compile(r"assembly.ll line: (\d+)\r?\n")
_RE_ERROR_FILE = re.compile(r"^test(\d+)\.")
_RE_KTEST_FILE = re.compile(r"^(test(\d+))\.ktest$")
def _parse_error(path):
assert os.path.exists(os.path.dirname(path))
try:
with open(path) as file:
match = _force_match(_RE_ERROR, file.readline(), "{}: Invalid error message in line 1", path)
message = match.group(1)
match = _force_match(_RE_FILE, file.readline(), "{}: Invalid file in line 2", path)
filename = match.group(1)
match = _force_match(_RE_LINE, file.readline(), "{}: Invalid line number in line 3", path)
line = int(match.group(1))
match = _force_match(_RE_ASSEMBLY_LINE, file.readline(), "{}: Invalid assembly.ll line number in line 4", path)
assline = int(match.group(1))
if file.readline().rstrip() != "Stack:":
raise InputError("{}: Invalid begin stacktrace stack in line 5".format(path))
stack = file.readlines()
return ErrorFile(message, filename, line, assline, stack)
except FileNotFoundError:
return None
class Test:
"""
A KLEE test case
Attributes:
early -- early termination info (None if it did not happen)
error -- execution error info (None if it did not happen)
abort -- abortion error info (None if it did not happen)
assertion -- assertion error info (None if it did not happen)
division -- division error info (None if it did not happen)
"""
def __str__(self):
msg = "Test {\n"
msg += "ktest_file: {}\n".format(self.ktest_file)
msg += "identifier: {},\n".format(self.identifier)
msg += "type: \"{}\",\n".format(self.type_string)
if self.early:
msg += "Return early reason: \"{}\"\n".format(self.early.message)
elif self.error:
msg += "Error: {},\n".format(self.error)
msg += "}\n"
return msg
@property
def type_string(self):
msg = "type: "
if self.is_successful_termination:
return "successful termination"
if self.early:
return "early termination"
if self.execution_error:
return "execution error"
if self.abort:
return "abort"
if self.division:
return "division by zero"
if self.assertion:
return "assertion failure"
if self.free:
return "use after free"
if self.ptr:
return "invalid pointer dereference"
if self.overshift:
return "overshift"
if self.readonly_error:
return "read only error"
if self.user_error:
return "user error"
if self.overflow:
return "integer overflow"
if self.misc_error:
return "misc error"
raise Exception('Unhandled test type')
def __init__(self, path: "path to ktest file"):
# pylint: disable=too-many-branches
"""Load a KLEE test case"""
if not path.endswith('.ktest'):
raise Exception('path is not a ktest file')
if not os.path.exists(path):
raise Exception('{} does not exist'.format(path))
# Get identifier and path stub
self.ktest_file = os.path.abspath(path)
_logger.debug('Creating test with path "{}"'.format(self.ktest_file))
basename = os.path.basename(path)
m = _RE_KTEST_FILE.match(basename)
if m is None:
raise Exception('Failed to match KTest file')
self.__pathstub = m.group(1)
assert self.__pathstub.startswith('test')
self.identifier = int(m.group(2))
assert self.identifier >= 0
self.error = None
self.execution_error = None
self.abort = None
self.division = None
self.assertion = None
self.free = None
self.ptr = None
self.overshift = None
self.readonly_error = None
self.user_error = None
self.overflow = None
self.misc_error = None
klee_dir_path = os.path.dirname(path)
assert os.path.exists(klee_dir_path)
early_path = os.path.join(klee_dir_path, self.__pathstub) + ".early"
self.early = _parse_early(early_path) # FIXME: Mutually exclusive?
_logger.debug('klee_dir_path: "{}"'.format(klee_dir_path))
error_file_map = Test._get_error_file_map_for(klee_dir_path)
error_file_path = None
try:
error_file_path = error_file_map[self.identifier]
except KeyError:
# No error file
pass
if error_file_path is not None:
error = os.path.join(klee_dir_path, error_file_path)
if not os.path.exists(error):
raise Exception('Error file "{}" does not exist'.format(error))
self.error = _parse_error(error)
error = error[:-4]
error = error[error.rfind(".")+1:]
if error == "abort":
self.abort = self.error
elif error == "assert":
self.assertion = self.error
elif error == "div":
self.division = self.error
elif error == "exec":
self.execution_error = self.error
elif error == "free":
self.free = self.error
elif error == "overflow":
self.overflow = self.error
elif error == "overshift":
self.overshift = self.error
elif error == "ptr":
self.ptr = self.error
elif error == "readonly":
self.readonly_error = self.error
elif error == "user":
self.user_error = self.error
else:
self.misc_error = self.error
# Sanity check
if self.error:
assert self.early is None
@property
def ktest_path(self):
"""Path to the matching .ktest file"""
return self.__pathstub + ".ktest"
@property
def pc_path(self):
"""Path to the matching .pc file"""
return self.__pathstub + ".pc"
_error_file_map_cache = dict()
@classmethod
def _get_error_file_map_for(cls, path):
"""
This returns a map from identifiers
to error files for the particular
`path` (a KLEE directory).
This is essentially a cache which
avoids traversing a KLEE directory
multiple times.
"""
# FIXME: There should be a lock on this!
error_file_map = None
try:
return cls._error_file_map_cache[path]
except KeyError:
# This KLEE directory has not been visited before
error_file_map = dict()
cls._error_file_map_cache[path] = error_file_map
# Initialise the map
errorFiles = glob.glob(os.path.join(glob.escape(path),'test*.*.err'))
for errorFileFullPath in errorFiles:
# Get identifier from the file name
basename = os.path.basename(errorFileFullPath)
m = _RE_ERROR_FILE.match(basename)
if m is None:
raise Exception('Could not get identifier from test file name')
identifier = int(m.group(1))
_logger.debug("Adding mapping [{}] => \"{}\"".format(
identifier,
basename))
if identifier in error_file_map:
raise Exception("Identifier should not already be in the map")
error_file_map[identifier] = basename
return error_file_map
@property
def is_error(self):
return self.error is not None
@property
def is_successful_termination(self):
return (not self.is_error) and (self.early is None)
| |
from time import time
from itertools import chain
import logging
import numpy as np
from dipy.tracking.streamline import (set_number_of_points, nbytes,
select_random_set_of_streamlines)
from dipy.segment.clustering import qbx_and_merge
from dipy.tracking.distances import (bundles_distances_mdf,
bundles_distances_mam)
from dipy.align.streamlinear import (StreamlineLinearRegistration,
BundleMinDistanceMetric,
BundleSumDistanceMatrixMetric,
BundleMinDistanceAsymmetricMetric)
from dipy.tracking.streamline import Streamlines, length
from nibabel.affines import apply_affine
def check_range(streamline, gt, lt):
length_s = length(streamline)
if (length_s > gt) & (length_s < lt):
return True
else:
return False
logger = logging.getLogger(__name__)
def bundle_adjacency(dtracks0, dtracks1, threshold):
""" Find bundle adjacency between two given tracks/bundles
Parameters
----------
dtracks0 : Streamlines
White matter tract from one subject
dtracks1 : Streamlines
White matter tract from another subject
threshold : float
Threshold controls
how much strictness user wants while calculating bundle adjacency
between two bundles. Smaller threshold means bundles should be strictly
adjacent to get higher BA score.
Returns
-------
res : Float
Bundle adjacency score between two tracts
References
----------
.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
tractography simplification, Frontiers in Neuroscience,
vol 6, no 175, 2012.
"""
d01 = bundles_distances_mdf(dtracks0, dtracks1)
pair12 = []
for i in range(len(dtracks0)):
if np.min(d01[i, :]) < threshold:
j = np.argmin(d01[i, :])
pair12.append((i, j))
pair12 = np.array(pair12)
pair21 = []
# solo2 = []
for i in range(len(dtracks1)):
if np.min(d01[:, i]) < threshold:
j = np.argmin(d01[:, i])
pair21.append((i, j))
pair21 = np.array(pair21)
A = len(pair12) / np.float(len(dtracks0))
B = len(pair21) / np.float(len(dtracks1))
res = 0.5 * (A + B)
return res
def ba_analysis(recognized_bundle, expert_bundle, nb_pts=20, threshold=6.):
""" Calculates bundle adjacency score between two given bundles
Parameters
----------
recognized_bundle : Streamlines
Extracted bundle from the whole brain tractogram (eg: AF_L)
expert_bundle : Streamlines
Model bundle used as reference while extracting similar type bundle
from inout tractogram
nb_pts : integer (default 20)
Discretizing streamlines to have nb_pts number of points
threshold : float (default 6)
Threshold used for in computing bundle adjacency. Threshold controls
how much strictness user wants while calculating bundle adjacency
between two bundles. Smaller threshold means bundles should be strictly
adjacent to get higher BA score.
Returns
-------
Bundle adjacency score between two tracts
References
----------
.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
tractography simplification, Frontiers in Neuroscience,
vol 6, no 175, 2012.
"""
recognized_bundle = set_number_of_points(recognized_bundle, nb_pts)
expert_bundle = set_number_of_points(expert_bundle, nb_pts)
return bundle_adjacency(recognized_bundle, expert_bundle, threshold)
def cluster_bundle(bundle, clust_thr, rng, nb_pts=20, select_randomly=500000):
""" Clusters bundles
Parameters
----------
bundle : Streamlines
White matter tract
clust_thr : float
clustering threshold used in quickbundlesX
rng : RandomState
nb_pts: integer (default 20)
Discretizing streamlines to have nb_points number of points
select_randomly: integer (default 500000)
Randomly select streamlines from the input bundle
Returns
-------
centroids : Streamlines
clustered centroids of the input bundle
References
----------
.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
tractography simplification, Frontiers in Neuroscience,
vol 6, no 175, 2012.
"""
model_cluster_map = qbx_and_merge(bundle, clust_thr,
nb_pts=nb_pts,
select_randomly=select_randomly,
rng=rng)
centroids = model_cluster_map.centroids
return centroids
def bundle_shape_similarity(bundle1, bundle2, rng, clust_thr=[5, 3, 1.5],
threshold=6):
""" Calculates bundle shape similarity between two given bundles using
bundle adjacency (BA) metric
Parameters
----------
bundle1 : Streamlines
White matter tract from one subject (eg: AF_L)
bundle2 : Streamlines
White matter tract from another subject (eg: AF_L)
rng : RandomState
clust_thr : list of float (default [5, 3, 1.5])
list of clustering thresholds used in quickbundlesX
threshold : float (default 6)
Threshold used for in computing bundle adjacency. Threshold controls
how much strictness user wants while calculating shape similarity
between two bundles. Smaller threshold means bundles should be strictly
similar to get higher shape similarity score.
Returns
-------
ba_value : Float
Bundle similarity score between two tracts
References
----------
.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
tractography simplification, Frontiers in Neuroscience,
vol 6, no 175, 2012.
"""
if len(bundle1) == 0 or len(bundle2) == 0:
return 0
bundle1_centroids = cluster_bundle(bundle1, clust_thr=clust_thr,
rng=rng)
bundle2_centroids = cluster_bundle(bundle2, clust_thr=clust_thr,
rng=rng)
bundle1_centroids = Streamlines(bundle1_centroids)
bundle2_centroids = Streamlines(bundle2_centroids)
ba_value = ba_analysis(bundle1_centroids, bundle2_centroids,
threshold)
return ba_value
class RecoBundles(object):
def __init__(self, streamlines, greater_than=50, less_than=1000000,
cluster_map=None, clust_thr=15, nb_pts=20,
rng=None, verbose=False):
""" Recognition of bundles
Extract bundles from a participants' tractograms using model bundles
segmented from a different subject or an atlas of bundles.
See [Garyfallidis17]_ for the details.
Parameters
----------
streamlines : Streamlines
The tractogram in which you want to recognize bundles.
greater_than : int, optional
Keep streamlines that have length greater than
this value (default 50)
less_than : int, optional
Keep streamlines have length less than this value (default 1000000)
cluster_map : QB map, optional.
Provide existing clustering to start RB faster (default None).
clust_thr : float, optional.
Distance threshold in mm for clustering `streamlines`.
Default: 15.
nb_pts : int, optional.
Number of points per streamline (default 20)
rng : RandomState
If None define RandomState in initialization function.
Default: None
verbose: bool, optional.
If True, log information.
Notes
-----
Make sure that before creating this class that the streamlines and
the model bundles are roughly in the same space.
Also default thresholds are assumed in RAS 1mm^3 space. You may
want to adjust those if your streamlines are not in world coordinates.
References
----------
.. [Garyfallidis17] Garyfallidis et al. Recognition of white matter
bundles using local and global streamline-based registration and
clustering, Neuroimage, 2017.
"""
map_ind = np.zeros(len(streamlines))
for i in range(len(streamlines)):
map_ind[i] = check_range(streamlines[i], greater_than, less_than)
map_ind = map_ind.astype(bool)
self.orig_indices = np.array(list(range(0, len(streamlines))))
self.filtered_indices = np.array(self.orig_indices[map_ind])
self.streamlines = Streamlines(streamlines[map_ind])
self.nb_streamlines = len(self.streamlines)
self.verbose = verbose
if self.verbose:
logger.info("target brain streamlines length = %s" % len(streamlines))
logger.info("After refining target brain streamlines" +
"length = %s" % len(self.streamlines))
self.start_thr = [40, 25, 20]
if rng is None:
self.rng = np.random.RandomState()
else:
self.rng = rng
if cluster_map is None:
self._cluster_streamlines(clust_thr=clust_thr, nb_pts=nb_pts)
else:
if self.verbose:
t = time()
self.cluster_map = cluster_map
self.cluster_map.refdata = self.streamlines
self.centroids = self.cluster_map.centroids
self.nb_centroids = len(self.centroids)
self.indices = [cluster.indices for cluster in self.cluster_map]
if self.verbose:
logger.info(' Streamlines have %d centroids'
% (self.nb_centroids,))
logger.info(' Total loading duration %0.3f sec. \n'
% (time() - t,))
def _cluster_streamlines(self, clust_thr, nb_pts):
if self.verbose:
t = time()
logger.info('# Cluster streamlines using QBx')
logger.info(' Tractogram has %d streamlines'
% (len(self.streamlines), ))
logger.info(' Size is %0.3f MB' % (nbytes(self.streamlines),))
logger.info(' Distance threshold %0.3f' % (clust_thr,))
# TODO this needs to become a default parameter
thresholds = self.start_thr + [clust_thr]
merged_cluster_map = qbx_and_merge(self.streamlines, thresholds,
nb_pts, None, self.rng,
self.verbose)
self.cluster_map = merged_cluster_map
self.centroids = merged_cluster_map.centroids
self.nb_centroids = len(self.centroids)
self.indices = [cluster.indices for cluster in self.cluster_map]
if self.verbose:
logger.info(' Streamlines have %d centroids'
% (self.nb_centroids,))
logger.info(' Total duration %0.3f sec. \n' % (time() - t,))
def recognize(self, model_bundle, model_clust_thr,
reduction_thr=10,
reduction_distance='mdf',
slr=True,
slr_num_threads=None,
slr_metric=None,
slr_x0=None,
slr_bounds=None,
slr_select=(400, 600),
slr_method='L-BFGS-B',
pruning_thr=5,
pruning_distance='mdf'):
""" Recognize the model_bundle in self.streamlines
Parameters
----------
model_bundle : Streamlines
model_clust_thr : float
reduction_thr : float
reduction_distance : string
mdf or mam (default mdf)
slr : bool
Use Streamline-based Linear Registration (SLR) locally
(default True)
slr_metric : BundleMinDistanceMetric
slr_x0 : array
(default None)
slr_bounds : array
(default None)
slr_select : tuple
Select the number of streamlines from model to neirborhood of
model to perform the local SLR.
slr_method : string
Optimization method (default 'L-BFGS-B')
pruning_thr : float
pruning_distance : string
MDF ('mdf') and MAM ('mam')
Returns
-------
recognized_transf : Streamlines
Recognized bundle in the space of the model tractogram
recognized_labels : array
Indices of recognized bundle in the original tractogram
References
----------
.. [Garyfallidis17] Garyfallidis et al. Recognition of white matter
bundles using local and global streamline-based registration and
clustering, Neuroimage, 2017.
"""
if self.verbose:
t = time()
logger.info('## Recognize given bundle ## \n')
model_centroids = self._cluster_model_bundle(
model_bundle,
model_clust_thr=model_clust_thr)
neighb_streamlines, neighb_indices = self._reduce_search_space(
model_centroids,
reduction_thr=reduction_thr,
reduction_distance=reduction_distance)
if len(neighb_streamlines) == 0:
return Streamlines([]), []
if slr:
transf_streamlines, slr1_bmd = self._register_neighb_to_model(
model_bundle,
neighb_streamlines,
metric=slr_metric,
x0=slr_x0,
bounds=slr_bounds,
select_model=slr_select[0],
select_target=slr_select[1],
method=slr_method,
num_threads=slr_num_threads)
else:
transf_streamlines = neighb_streamlines
pruned_streamlines, labels = self._prune_what_not_in_model(
model_centroids,
transf_streamlines,
neighb_indices,
pruning_thr=pruning_thr,
pruning_distance=pruning_distance)
if self.verbose:
logger.info('Total duration of recognition time is %0.3f sec.\n'
% (time()-t,))
return pruned_streamlines, self.filtered_indices[labels]
def refine(self, model_bundle, pruned_streamlines, model_clust_thr,
reduction_thr=14,
reduction_distance='mdf',
slr=True,
slr_metric=None,
slr_x0=None,
slr_bounds=None,
slr_select=(400, 600),
slr_method='L-BFGS-B',
pruning_thr=6,
pruning_distance='mdf'):
""" Refine and recognize the model_bundle in self.streamlines
This method expects once pruned streamlines as input. It refines the
first ouput of recobundle by applying second local slr (optional),
and second pruning. This method is useful when we are dealing with
noisy data or when we want to extract small tracks from tractograms.
Parameters
----------
model_bundle : Streamlines
pruned_streamlines : Streamlines
model_clust_thr : float
reduction_thr : float
reduction_distance : string
mdf or mam (default mam)
slr : bool
Use Streamline-based Linear Registration (SLR) locally
(default True)
slr_metric : BundleMinDistanceMetric
slr_x0 : array
(default None)
slr_bounds : array
(default None)
slr_select : tuple
Select the number of streamlines from model to neirborhood of
model to perform the local SLR.
slr_method : string
Optimization method (default 'L-BFGS-B')
pruning_thr : float
pruning_distance : string
MDF ('mdf') and MAM ('mam')
Returns
-------
recognized_transf : Streamlines
Recognized bundle in the space of the model tractogram
recognized_labels : array
Indices of recognized bundle in the original tractogram
References
----------
.. [Garyfallidis17] Garyfallidis et al. Recognition of white matter
bundles using local and global streamline-based registration and
clustering, Neuroimage, 2017.
"""
if self.verbose:
t = time()
logger.info('## Refine recognize given bundle ## \n')
model_centroids = self._cluster_model_bundle(
model_bundle,
model_clust_thr=model_clust_thr)
pruned_model_centroids = self._cluster_model_bundle(
pruned_streamlines,
model_clust_thr=model_clust_thr)
neighb_streamlines, neighb_indices = self._reduce_search_space(
pruned_model_centroids,
reduction_thr=reduction_thr,
reduction_distance=reduction_distance)
if len(neighb_streamlines) == 0: # if no streamlines recognized
return Streamlines([]), []
if self.verbose:
logger.info("2nd local Slr")
if slr:
transf_streamlines, slr2_bmd = self._register_neighb_to_model(
model_bundle,
neighb_streamlines,
metric=slr_metric,
x0=slr_x0,
bounds=slr_bounds,
select_model=slr_select[0],
select_target=slr_select[1],
method=slr_method)
if self.verbose:
logger.info("pruning after 2nd local Slr")
pruned_streamlines, labels = self._prune_what_not_in_model(
model_centroids,
transf_streamlines,
neighb_indices,
pruning_thr=pruning_thr,
pruning_distance=pruning_distance)
if self.verbose:
logger.info('Total duration of recognition time is %0.3f sec.\n'
% (time()-t,))
return pruned_streamlines, self.filtered_indices[labels]
def evaluate_results(self, model_bundle, pruned_streamlines, slr_select):
""" Compare the similiarity between two given bundles, model bundle,
and extracted bundle.
Parameters
----------
model_bundle : Streamlines
pruned_streamlines : Streamlines
slr_select : tuple
Select the number of streamlines from model to neirborhood of
model to perform the local SLR.
Returns
-------
ba_value : float
bundle adjacency value between model bundle and pruned bundle
bmd_value : float
bundle minimum distance value between model bundle and
pruned bundle
"""
spruned_streamlines = Streamlines(pruned_streamlines)
recog_centroids = self._cluster_model_bundle(
spruned_streamlines,
model_clust_thr=1.25)
mod_centroids = self._cluster_model_bundle(
model_bundle,
model_clust_thr=1.25)
recog_centroids = Streamlines(recog_centroids)
model_centroids = Streamlines(mod_centroids)
ba_value = bundle_adjacency(set_number_of_points(recog_centroids, 20),
set_number_of_points(model_centroids, 20),
threshold=10)
BMD = BundleMinDistanceMetric()
static = select_random_set_of_streamlines(model_bundle,
slr_select[0])
moving = select_random_set_of_streamlines(pruned_streamlines,
slr_select[1])
nb_pts = 20
static = set_number_of_points(static, nb_pts)
moving = set_number_of_points(moving, nb_pts)
BMD.setup(static, moving)
x0 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1, 0, 0, 0]) # affine
bmd_value = BMD.distance(x0.tolist())
return ba_value, bmd_value
def _cluster_model_bundle(self, model_bundle, model_clust_thr, nb_pts=20,
select_randomly=500000):
if self.verbose:
t = time()
logger.info('# Cluster model bundle using QBX')
logger.info(' Model bundle has %d streamlines'
% (len(model_bundle), ))
logger.info(' Distance threshold %0.3f' % (model_clust_thr,))
thresholds = self.start_thr + [model_clust_thr]
model_cluster_map = qbx_and_merge(model_bundle, thresholds,
nb_pts=nb_pts,
select_randomly=select_randomly,
rng=self.rng)
model_centroids = model_cluster_map.centroids
nb_model_centroids = len(model_centroids)
if self.verbose:
logger.info(' Model bundle has %d centroids'
% (nb_model_centroids,))
logger.info(' Duration %0.3f sec. \n' % (time() - t, ))
return model_centroids
def _reduce_search_space(self, model_centroids,
reduction_thr=20, reduction_distance='mdf'):
if self.verbose:
t = time()
logger.info('# Reduce search space')
logger.info(' Reduction threshold %0.3f' % (reduction_thr,))
logger.info(' Reduction distance {}'.format(reduction_distance))
if reduction_distance.lower() == 'mdf':
if self.verbose:
logger.info(' Using MDF')
centroid_matrix = bundles_distances_mdf(model_centroids,
self.centroids)
elif reduction_distance.lower() == 'mam':
if self.verbose:
logger.info(' Using MAM')
centroid_matrix = bundles_distances_mam(model_centroids,
self.centroids)
else:
raise ValueError('Given reduction distance not known')
centroid_matrix[centroid_matrix > reduction_thr] = np.inf
mins = np.min(centroid_matrix, axis=0)
close_clusters_indices = list(np.where(mins != np.inf)[0])
close_clusters = self.cluster_map[close_clusters_indices]
neighb_indices = [cluster.indices for cluster in close_clusters]
neighb_streamlines = Streamlines(chain(*close_clusters))
nb_neighb_streamlines = len(neighb_streamlines)
if nb_neighb_streamlines == 0:
if self.verbose:
logger.info('You have no neighbor streamlines... ' +
'No bundle recognition')
return Streamlines([]), []
if self.verbose:
logger.info(' Number of neighbor streamlines %d' %
(nb_neighb_streamlines,))
logger.info(' Duration %0.3f sec. \n' % (time() - t,))
return neighb_streamlines, neighb_indices
def _register_neighb_to_model(self, model_bundle, neighb_streamlines,
metric=None, x0=None, bounds=None,
select_model=400, select_target=600,
method='L-BFGS-B',
nb_pts=20, num_threads=None):
if self.verbose:
logger.info('# Local SLR of neighb_streamlines to model')
t = time()
if metric is None or metric == 'symmetric':
metric = BundleMinDistanceMetric(num_threads=num_threads)
if metric == 'asymmetric':
metric = BundleMinDistanceAsymmetricMetric()
if metric == 'diagonal':
metric = BundleSumDistanceMatrixMetric()
if x0 is None:
x0 = 'similarity'
if bounds is None:
bounds = [(-30, 30), (-30, 30), (-30, 30),
(-45, 45), (-45, 45), (-45, 45), (0.8, 1.2)]
# TODO this can be speeded up by using directly the centroids
static = select_random_set_of_streamlines(model_bundle,
select_model, rng=self.rng)
moving = select_random_set_of_streamlines(neighb_streamlines,
select_target, rng=self.rng)
static = set_number_of_points(static, nb_pts)
moving = set_number_of_points(moving, nb_pts)
slr = StreamlineLinearRegistration(metric=metric, x0=x0,
bounds=bounds,
method=method)
slm = slr.optimize(static, moving)
transf_streamlines = neighb_streamlines.copy()
transf_streamlines._data = apply_affine(
slm.matrix, transf_streamlines._data)
transf_matrix = slm.matrix
slr_bmd = slm.fopt
slr_iterations = slm.iterations
if self.verbose:
logger.info(' Square-root of BMD is %.3f' % (np.sqrt(slr_bmd),))
if slr_iterations is not None:
logger.info(' Number of iterations %d' % (slr_iterations,))
logger.info(' Matrix size {}'.format(slm.matrix.shape))
original = np.get_printoptions()
np.set_printoptions(3, suppress=True)
logger.info(transf_matrix)
logger.info(slm.xopt)
np.set_printoptions(**original)
logger.info(' Duration %0.3f sec. \n' % (time() - t,))
return transf_streamlines, slr_bmd
def _prune_what_not_in_model(self, model_centroids,
transf_streamlines,
neighb_indices,
mdf_thr=5,
pruning_thr=10,
pruning_distance='mdf'):
if self.verbose:
if pruning_thr < 0:
logger.info('Pruning_thr has to be greater or equal to 0')
logger.info('# Prune streamlines using the MDF distance')
logger.info(' Pruning threshold %0.3f' % (pruning_thr,))
logger.info(' Pruning distance {}'.format(pruning_distance))
t = time()
thresholds = [40, 30, 20, 10, mdf_thr]
rtransf_cluster_map = qbx_and_merge(transf_streamlines,
thresholds, nb_pts=20,
select_randomly=500000,
rng=self.rng)
if self.verbose:
logger.info(' QB Duration %0.3f sec. \n' % (time() - t, ))
rtransf_centroids = rtransf_cluster_map.centroids
if pruning_distance.lower() == 'mdf':
if self.verbose:
logger.info(' Using MDF')
dist_matrix = bundles_distances_mdf(model_centroids,
rtransf_centroids)
elif pruning_distance.lower() == 'mam':
if self.verbose:
logger.info(' Using MAM')
dist_matrix = bundles_distances_mam(model_centroids,
rtransf_centroids)
else:
raise ValueError('Given pruning distance is not available')
dist_matrix[np.isnan(dist_matrix)] = np.inf
dist_matrix[dist_matrix > pruning_thr] = np.inf
pruning_matrix = dist_matrix.copy()
if self.verbose:
logger.info(' Pruning matrix size is (%d, %d)'
% pruning_matrix.shape)
mins = np.min(pruning_matrix, axis=0)
pruned_indices = [rtransf_cluster_map[i].indices
for i in np.where(mins != np.inf)[0]]
pruned_indices = list(chain(*pruned_indices))
idx = np.array(pruned_indices)
if len(idx) == 0:
if self.verbose:
logger.info(' You have removed all streamlines')
return Streamlines([]), []
pruned_streamlines = transf_streamlines[idx]
initial_indices = list(chain(*neighb_indices))
final_indices = [initial_indices[i] for i in pruned_indices]
labels = final_indices
if self.verbose:
msg = ' Number of centroids: %d'
logger.info(msg % (len(rtransf_centroids),))
msg = ' Number of streamlines after pruning: %d'
logger.info(msg % (len(pruned_streamlines),))
logger.info(' Duration %0.3f sec. \n' % (time() - t, ))
return pruned_streamlines, labels
| |
from enum import Enum
import logging
from time import time
import functools as ft
from struct import unpack
from slowboy.util import ClockListener, add_s8
from slowboy.gfx import get_tile_surfaces, ltorgba, decode_2bit, decode_tile
from slowboy.interrupts import InterruptController, InterruptType
import sdl2
from sdl2 import SDL_BlitSurface, SDL_Rect, SDL_Error, SDL_ConvertSurfaceFormat
from sdl2.ext import Color
VRAM_START = 0x8000
OAM_START = 0xfe00
LCDC_DISPLAY_ENABLE_OFFSET = 7
LCDC_DISPLAY_ENABLE_MASK = 1 << LCDC_DISPLAY_ENABLE_OFFSET
LCDC_WINDOW_TILE_DISPLAY_SELECT_OFFSET = 6
LCDC_WINDOW_TILE_DISPLAY_SELECT_MASK = 1 << LCDC_WINDOW_TILE_DISPLAY_SELECT_OFFSET
LCDC_WINDOW_DISPLAY_ENABLE_OFFSET = 5
LCDC_WINDOW_DISPLAY_ENABLE_MASK = 1 << LCDC_WINDOW_DISPLAY_ENABLE_OFFSET
LCDC_BG_WINDOW_DATA_SELECT_OFFSET = 4
LCDC_BG_WINDOW_DATA_SELECT_MASK = 1 << LCDC_BG_WINDOW_DATA_SELECT_OFFSET
LCDC_BG_TILE_DISPLAY_SELECT_OFFSET = 3
LCDC_BG_TILE_DISPLAY_SELECT_MASK = 1 << LCDC_BG_TILE_DISPLAY_SELECT_OFFSET
LCDC_SPRITE_SIZE_OFFSET = 2
LCDC_SPRITE_DISPLAY_ENABLE_OFFSET = 1
LCDC_SPRITE_DISPLAY_ENABLE_MASK = 1 << LCDC_SPRITE_DISPLAY_ENABLE_OFFSET
LCDC_BG_DISPLAY_OFFSET = 0
LCDC_BG_DISPLAY_MASK = 1 << LCDC_BG_DISPLAY_OFFSET
STAT_LYC_IE_OFFSET = 6
STAT_LYC_IE_MASK = 1 << STAT_LYC_IE_OFFSET
STAT_OAM_IE_OFFSET = 5
STAT_OAM_IE_MASK = 1 << STAT_OAM_IE_OFFSET
STAT_VBLANK_IE_OFFSET = 4
STAT_VBLANK_IE_MASK = 1 << STAT_VBLANK_IE_OFFSET
STAT_HBLANK_IE_OFFSET = 3
STAT_HBLANK_IE_MASK = 1 << STAT_HBLANK_IE_OFFSET
STAT_LYC_FLAG_OFFSET = 2
STAT_LYC_FLAG_MASK = 1 << STAT_LYC_FLAG_OFFSET
STAT_MODE_OFFSET = 0
STAT_MODE_MASK = 0x03
TWIDTH = 8
THEIGHT = 8
TSWIDTH = 128
TSHEIGHT = 128
TSWIDTH_TILES = TSWIDTH // TWIDTH
TSHEIGHT_TILES = TSHEIGHT // THEIGHT
SCREEN_WIDTH = 160
SCREEN_HEIGHT = 144
SWIDTH_TILES = SCREEN_WIDTH // TWIDTH
SHEIGHT_TILES = SCREEN_HEIGHT // THEIGHT
BACKGROUND_WIDTH = 256
BACKGROUND_HEIGHT = 256
BGWIDTH_TILES = BACKGROUND_WIDTH // TWIDTH
BGHEIGHT_TILES = BACKGROUND_HEIGHT // THEIGHT
BACKGROUND_SIZE = (BACKGROUND_WIDTH, BACKGROUND_HEIGHT)
FOREGROUND_WIDTH = 256
FOREGROUND_HEIGHT = 256
FOREGROUND_SIZE = (FOREGROUND_WIDTH, FOREGROUND_HEIGHT)
SPRITETAB_SIZE = 40
SPRITETAB_ENTRY_SIZE = 4
def colorto8bit(c):
if c > 3:
raise ValueError
return (c ^ 0x3) * 0x55
class Mode(Enum):
H_BLANK = 0
V_BLANK = 1
OAM_READ = 2
OAM_VRAM_READ = 3
class GPU(ClockListener):
def __init__(self, logger=None, log_level=logging.INFO, interrupt_controller=None):
if logger is None:
self.logger = logging.getLogger(__name__)
else:
self.logger = logger.getChild(__class__.__name__)
if log_level is not None:
self.logger.setLevel(log_level)
self.interrupt_controller = interrupt_controller
self.vram = bytearray(0xa000 - 0x8000) # 0x8000-0x9fff
self.oam = bytearray(0xfea0 - 0xfe00) # 0xfe00-0xfe9f
self._bgsurfaces = []
self._fgsurfaces = []
self._bgsurface = sdl2.SDL_CreateRGBSurfaceWithFormat(0, BACKGROUND_WIDTH, BACKGROUND_HEIGHT,
32, sdl2.SDL_PIXELFORMAT_RGBA32)
self._fgsurface = sdl2.SDL_CreateRGBSurfaceWithFormat(0, BACKGROUND_WIDTH, BACKGROUND_HEIGHT,
32, sdl2.SDL_PIXELFORMAT_RGBA32)
# TODO may require changes for 8x16 sprites
self._spritesurface = \
sdl2.SDL_CreateRGBSurfaceWithFormat(0,
TWIDTH*SPRITETAB_SIZE, THEIGHT,
32, sdl2.SDL_PIXELFORMAT_RGBA32)
self._spritetab = [(0, 0, 0, 0) for _ in range(SPRITETAB_SIZE)]
self._tileset = sdl2.SDL_CreateRGBSurfaceWithFormat(0, 16*TWIDTH,
16*THEIGHT, 32,
sdl2.SDL_PIXELFORMAT_RGBA32)
# sdl2.SDL_Surface
self._fgtileset = None # sdl2.SDL_Surface
self._sprite_tiles = None # sdl2.SDL_Surface
self._palette = None
self._sprite_palette0 = None
self._sprite_palette1 = None
self._sprite_palette = None
self._needs_update = False
self._needs_draw = False
"""Bitmap indicating which background tiles have been updated in
:py:attr:GPU._tileset but not :py:attr:GPU._bgsurface"""
self._stale_bgtiles = 0
"""Bitmap indicating which foreground tiles have been updated in
:py:attr:GPU._fgtileset but not :py:attr:GPU._fgsurface"""
self._stale_fgtiles = 0
self._bgp = 0x00
self._obp0 = 0x00
self._obp1 = 0x00
self._lcdc = 0x00
self._stat = 0x00
self._scy = 0x00
self._scx = 0x00
self._ly = 0x00
self._lyc = 0x00
self._mode = Mode.OAM_READ
self._wy = 0x00
self._wx = 0x00
self.bgp = 0xfc # BG palette data
self.obp0 = 0xff # Object palette 0 data
self.obp1 = 0xff # Object palette 1 data
self.lcdc = 0x91 # LCD control register
self.stat = 0 # LCD status register
self.scy = 0 # Scroll y
self.scx = 0 # Scroll x
self.ly = 0 # LCD y-coordinate
self.lyc = 0 # LY compare
self.mode = Mode.OAM_READ
self.wy = 0 # Window y position
self.wx = 0 # Window x position - 7
# initialize _palettes
self.bgp = self._bgp
self.obp0 = self._obp0
self.obp1 = self._obp1
self.mode = Mode.OAM_READ
self.stat |= 0x03
self.mode_clock = 0
self.last_time = time()
self.frame_count = 0
self.fps = 0
def load_interrupt_controller(self, ic: InterruptController):
self.interrupt_controller = ic
def load_vram(self, vram):
assert len(vram) == 0xa000 - 0x8000
self.vram = bytearray(vram)
def load_oam(self, oam):
assert len(oam) == 0x100
self.oam = bytearray(oam)
@property
def lcdc(self):
return self._lcdc
@lcdc.setter
def lcdc(self, value):
if self._lcdc == value:
return
self._lcdc = value
#self.logger.debug('set LCDC to %#x', value)
self.logger.info('set LCDC to %#x', value)
self._update_vram('lcdc')
@property
def bgp(self):
return self._bgp
@bgp.setter
def bgp(self, value):
if self._bgp == value:
return
self._bgp = value
self.logger.debug('set BGP to %#x', value)
self._palette = [
colorto8bit(value & 0x3),
colorto8bit((value >> 2) & 0x3),
colorto8bit((value >> 4) & 0x3),
colorto8bit((value >> 6) & 0x3),
]
self.logger.debug('set _palette to [%#x, %#x, %#x, %#x]',
self._palette[0], self._palette[1], self._palette[2], self._palette[3])
self._update_vram('bgp')
@property
def obp0(self):
return self._obp0
@obp0.setter
def obp0(self, value):
if self._obp0 == value:
return
self._obp0 = value
self.logger.debug('set OBP0 to %#x', value)
# lower 2 bits aren't used for object palette (color 0 indicates
# transparent)
self._sprite_palette0 = [
0xff,
colorto8bit((value >> 2) & 0x3),
colorto8bit((value >> 4) & 0x3),
colorto8bit((value >> 6) & 0x3),
]
self.logger.debug('set _sprite_palette0 to [%#x, %#x, %#x]',
self._sprite_palette0[1], self._sprite_palette0[2],
self._sprite_palette0[3])
self._update_vram('obp0')
@property
def obp1(self):
return self._obp1
@obp1.setter
def obp1(self, value):
if self._obp1 == value:
return
self._obp1 = value
self.logger.debug('set OBP1 to %#x', value)
# lower 2 bits aren't used for object palette (color 0 indicates
# transparent)
self._sprite_palette1 = [
0xff,
colorto8bit((value >> 2) & 0x3),
colorto8bit((value >> 4) & 0x3),
colorto8bit((value >> 6) & 0x3),
]
self.logger.debug('set _sprite_palette0 to [%#x, %#x, %#x]',
self._sprite_palette1[1], self._sprite_palette1[2],
self._sprite_palette1[3])
self._update_vram('obp1')
@property
def scx(self):
return self._scx
@scx.setter
def scx(self, value):
if self._scx == value:
return
value &= 0xff
self._scx = value
self._update_vram('scx')
self.logger.debug('set SCX to %#x', value)
@property
def scy(self):
return self._scy
@scy.setter
def scy(self, value):
if self._scy == value:
return
value &= 0xff
self._scy = value
self._update_vram('scy')
self.logger.debug('set SCY to %#x', value)
@property
def ly(self):
return self._ly
@ly.setter
def ly(self, value):
if self._ly == value:
return
if value == self.lyc:
# LYC interrupt
self.stat |= 1 << STAT_LYC_FLAG_OFFSET
if self.interrupt_controller is not None:
self.interrupt_controller.notify_interrupt(InterruptType.stat)
else:
self.stat &= 0xff ^ (1 << STAT_LYC_FLAG_OFFSET)
self._ly = value
self.logger.debug('set LY to %#x', value)
@property
def lyc(self):
return self._lyc
@lyc.setter
def lyc(self, value):
if value == self._lyc:
return
if value == self.ly:
# LYC interrupt
self.stat |= 1 << STAT_LYC_FLAG_OFFSET
if self.interrupt_controller is not None:
self.interrupt_controller.notify_interrupt(InterruptType.stat)
else:
self.stat &= 0xff ^ (1 << STAT_LYC_FLAG_OFFSET)
self._lyc = value
self.logger.debug('set LYC to %#x', value)
@property
def wy(self):
return self._wy
@wy.setter
def wy(self, value):
if self._wy == value:
return
self._wy = value
self._update_vram('wy')
self.logger.debug('set WY to %#x', value)
@property
def wx(self):
return self._wx
@wx.setter
def wx(self, value):
value -= 7
if self._wy == value:
return
self._wx = value
self._update_vram('wx')
self.logger.debug('set WX to %#x', value)
@property
def stat(self):
stat = self._stat & ~(STAT_LYC_FLAG_MASK | STAT_MODE_MASK)
if self.ly == self.lyc:
stat |= 1 << STAT_LYC_FLAG_OFFSET
stat |= self.mode.value << STAT_MODE_OFFSET
self._stat = stat
return stat
@stat.setter
def stat(self, value):
"""STAT IO register.
This setter should be called to update mode, and it will trigger
interrupts as necessary. If the LYC flag is set to 1, the corresponding
interrupt will also be triggered.
"""
interrupts = (value >> 3) & 0xf
old_mode = self._stat & 0x3
mode = value & 0x3
if (old_mode ^ mode) != 0:
# mode has changed -- new interrupts
if mode == 0 and interrupts & 0x1:
# hblank
self.interrupt_controller.notify_interrupt(InterruptType.stat)
elif mode == 1:
# vblank
if self.interrupt_controller is not None:
if interrupts & 0x2:
self.interrupt_controller.notify_interrupt(InterruptType.stat)
self.interrupt_controller.notify_interrupt(InterruptType.vblank)
elif mode == 2 and interrupts & 0x4:
# oam read
self.interrupt_controller.notify_interrupt(InterruptType.stat)
elif mode < 0 or mode > 3:
raise ValueError('Invalid mode {}'.format(mode))
old_lyc_flag = (self._stat >> STAT_LYC_FLAG_OFFSET) & 1
lyc_flag = (value >> STAT_LYC_FLAG_OFFSET) & 1
if (old_lyc_flag ^ lyc_flag) != 0:
if lyc_flag and interrupts & STAT_LYC_IE_MASK:
# ly coincidence
self.interrupt_controller.notify_interrupt(InterruptType.stat)
else:
value &= ~STAT_LYC_FLAG_MASK
self._stat = value
self.logger.debug('set STAT to %#x', value)
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
stat = self.stat
if value == Mode.OAM_READ and stat & STAT_OAM_IE_MASK and \
self.interrupt_controller is not None:
self.interrupt_controller.notify_interrupt(InterruptType.stat)
elif value == Mode.V_BLANK and stat & STAT_VBLANK_IE_MASK and \
self.interrupt_controller is not None:
self.interrupt_controller.notify_interrupt(InterruptType.stat)
elif value == Mode.H_BLANK and stat & STAT_HBLANK_IE_MASK and \
self.interrupt_controller is not None:
self.interrupt_controller.notify_interrupt(InterruptType.stat)
self._mode = value
self.stat = stat
def log_regs(self, log=None):
if log is None:
log = self.logger.debug
log('0xff40: LCDC: %#04x', self.lcdc)
log('0xff41: STAT: %#04x', self.stat)
log('0xff42: SCY : %#04x', self.scy)
log('0xff43: SCX : %#04x', self.scx)
log('0xff44: LY : %#04x', self.ly)
log('0xff45: LYC : %#04x', self.lyc)
#log('0xff45: DMA : %#04x', self.dma)
log('0xff47: BGP : %#04x', self.bgp)
log('0xff48: OBP0: %#04x', self.obp0)
log('0xff49: OBP1: %#04x', self.obp1)
log('0xff4a: WY : %#04x', self.wy)
log('0xff4b: WX : %#04x', self.wx)
def _update_tilesets(self):
"""Update all tileset surfaces. Only needs to be called when pallete or
tile data (in VRAM) changes.
"""
for i in range(TSWIDTH_TILES*TSHEIGHT_TILES):
self._update_tile(i)
def _update_surfaces(self):
self._update_bgsurface()
self._update_fgsurface()
self._update_sprite_surface()
def _update_bgsurface(self):
if self.lcdc & LCDC_BG_TILE_DISPLAY_SELECT_MASK:
# 1=9C00-9FFF
bgmap_start = 0x9c00 - VRAM_START
else:
# 0=9800-9BFF
bgmap_start = 0x9800 - VRAM_START
bgmap = self.vram[bgmap_start:bgmap_start+0x400]
if self.lcdc & LCDC_BG_WINDOW_DATA_SELECT_MASK == 0:
bgmap = bytes(map(ft.partial(add_s8, 128), bgmap))
bgsurface = self._bgsurface
stale_bgtiles = self._stale_bgtiles
tile_size = (8, 8)
width_tiles = BACKGROUND_WIDTH // TWIDTH
height_tiles = BACKGROUND_HEIGHT // THEIGHT
for i, tid in enumerate(bgmap):
if (stale_bgtiles >> tid) & 1 == 0:
continue
x = (i % width_tiles) * TWIDTH
y = (i // width_tiles) * THEIGHT
tx = (tid % TSWIDTH_TILES) * TWIDTH
ty = (tid // TSWIDTH_TILES) * THEIGHT
src = SDL_Rect(tx, ty, 8, 8)
dst = SDL_Rect(x, y, 8, 8)
SDL_BlitSurface(self._tileset, src, bgsurface, dst)
self._stale_bgtiles = 0
def _update_fgsurface(self):
if self.lcdc & LCDC_WINDOW_TILE_DISPLAY_SELECT_MASK:
# 1=9C00-9FFF
fgmap_start = 0x9c00 - VRAM_START
else:
# 0=9800-9BFF
fgmap_start = 0x9800 - VRAM_START
fgmap = self.vram[fgmap_start:fgmap_start+0x400]
if self.lcdc & LCDC_BG_WINDOW_DATA_SELECT_MASK == 0:
fgmap = bytes(map(ft.partial(add_s8, 128), fgmap))
stale_fgtiles = self._stale_fgtiles
fgsurface = self._fgsurface
stale_fgtiles = self._stale_fgtiles
tile_size = (8, 8)
width_tiles = FOREGROUND_WIDTH // TWIDTH
height_tiles = FOREGROUND_HEIGHT // THEIGHT
for i, tid in enumerate(fgmap):
if (stale_fgtiles >> tid) & 1 == 0:
continue
x = (i % width_tiles) * TWIDTH
y = (i // width_tiles) * THEIGHT
tx = (tid % TSWIDTH_TILES) * TWIDTH
ty = (tid // TSWIDTH_TILES) * THEIGHT
src = SDL_Rect(tx, ty, TWIDTH, THEIGHT)
dst = SDL_Rect(x, y, TWIDTH, THEIGHT)
SDL_BlitSurface(self._tileset, src, fgsurface, dst)
self._stale_fgtiles = 0
def _update_sprite_surface(self):
"""When the tileset or sprite palette is updated, refresh the decoded
sprite surfaces.
"""
for i in range(SPRITETAB_SIZE):
ent = unpack('BBBB',
self.oam[i*SPRITETAB_ENTRY_SIZE:(i+1)*SPRITETAB_ENTRY_SIZE])
self._spritetab[i] = ent
ypos, xpos, tileid, attrs = ent
x = i * TWIDTH
y = 0
tx = (tileid % TSWIDTH_TILES) * TWIDTH
ty = (tileid // TSWIDTH_TILES) * THEIGHT
src = SDL_Rect(tx, ty, TWIDTH, THEIGHT)
dst = SDL_Rect(x, y, TWIDTH, THEIGHT)
SDL_BlitSurface(self._tileset, src, self._spritesurface, dst)
def draw(self, surface):
"""Returns True if surface was updated and False otherwise."""
if self._needs_draw:
self._needs_draw = False
else:
return False
self.frame_count += 1
if self.frame_count >= 10:
t = time()
diff = t - self.last_time
self.fps = self.frame_count / diff
self.last_time = t
self.frame_count %= 10
self.logger.info('{} fps'.format(self.fps))
if self.lcdc & LCDC_DISPLAY_ENABLE_MASK == 0:
dst = SDL_Rect(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT)
color = sdl2.SDL_MapRGB(surface.format, 0xff, 0xff, 0xff)
if sdl2.SDL_FillRect(surface, dst, color) < 0:
raise sdl2.SDL_Error()
return True
if self._needs_update:
#self._update_tilesets()
#self._update_surfaces()
self._needs_update = False
# draw background
if self.lcdc & LCDC_BG_DISPLAY_MASK:
src = SDL_Rect(self.scx, self.scy, SCREEN_WIDTH, SCREEN_HEIGHT)
dst = SDL_Rect(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT)
if SDL_BlitSurface(self._bgsurface, src, surface, dst) < 0:
raise sdl2.SDL_Error()
#if self.scx + SCREEN_WIDTH > BACKGROUND_WIDTH:
# src = SDL_Rect(0, self.scy, self.scx + SCREEN_WIDTH - BACKGROUND_WIDTH, SCREEN_HEIGHT)
# dst = SDL_Rect(
else:
dst = SDL_Rect(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT)
color = sdl2.SDL_MapRGB(surface.format, 0xff, 0xff, 0xff)
if sdl2.SDL_FillRect(surface, dst, color) < 0:
raise sdl2.SDL_Error()
# draw foreground
if self.lcdc & LCDC_WINDOW_DISPLAY_ENABLE_MASK:
converted = sdl2.SDL_ConvertSurfaceFormat(self._fgsurface,
surface.format.contents.format,
0)
wx = self.wx
wy = self.wy
w = SCREEN_WIDTH - wx
h = SCREEN_HEIGHT - wy
src = SDL_Rect(0, 0, w, h)
dst = SDL_Rect(wx, wy, w, h)
if SDL_BlitSurface(converted, src, surface, dst) < 0:
raise sdl2.SDL_Error()
sdl2.SDL_FreeSurface(converted)
# draw sprites
if self.lcdc & LCDC_SPRITE_DISPLAY_ENABLE_MASK:
converted = sdl2.SDL_ConvertSurfaceFormat(self._spritesurface,
surface.format.contents.format,
0)
for i, ent in enumerate(self._spritetab):
ypos, xpos, tileid, attrs = ent
if ypos < 16 or xpos < 8:
continue
sx = i * TWIDTH
sy = 0
src = SDL_Rect(sx, sy, TWIDTH, THEIGHT)
dx = xpos - 8
dy = ypos - 16
dst = SDL_Rect(dx, dy, TWIDTH, THEIGHT)
if SDL_BlitSurface(converted, src, surface, dst) < 0:
raise SDL_Error()
sdl2.SDL_FreeSurface(converted)
return True
def present(self):
pass
#self.renderer.present()
def notify(self, clock, cycles):
self.mode_clock += cycles
if self.mode == Mode.OAM_READ:
if self.mode_clock >= 80:
self.mode = Mode.OAM_VRAM_READ # 3
self.stat ^= 0x3
self.stat |= self.mode.value
self.mode_clock %= 80
elif self.mode == Mode.OAM_VRAM_READ:
if self.mode_clock >= 172:
self.mode = Mode.H_BLANK # 0
self.stat ^= 0x3
self.stat |= self.mode.value
self.mode_clock %= 172
elif self.mode == Mode.H_BLANK:
if self.mode_clock >= 204:
if self.ly == 143:
self.mode = Mode.V_BLANK # 1
self.stat ^= 0x3
self.stat |= self.mode.value
else:
self.mode = Mode.OAM_READ # 2
self.stat ^= 0x3
self.stat |= self.mode.value
self.ly += 1
self.mode_clock %= 204
elif self.mode == Mode.V_BLANK:
if self.mode_clock % 204 == 0:
self.ly += 1
if self.mode_clock >= 4560:
self._needs_draw = True
self.mode = Mode.OAM_READ # 2
self.stat ^= 0x3
self.stat |= self.mode.value
self.mode_clock %= 4560
self.ly = 0
else:
raise ValueError('Invalid GPU mode')
def get_vram(self, addr):
return self.vram[addr]
def set_vram(self, addr, value):
self.vram[addr] = value
self.logger.debug('set VRAM %#06x=%#06x', VRAM_START+addr, value)
self._update_vram(addr)
def _update_tile(self, tileid):
"""Update tile :py:obj:`i` in :py:attr:`GPU._bgtiles`.
"""
if (self.lcdc & LCDC_WINDOW_DISPLAY_ENABLE_MASK == 0) and \
(self.lcdc & LCDC_BG_DISPLAY_MASK == 0) and \
(self.lcdc & LCDC_WINDOW_DISPLAY_ENABLE_MASK == 0):
return
# TODO changeme
assert tileid < 0x100
tile_idx = tileid * 16
encoded_tile = self.vram[tile_idx:tile_idx+16]
decoded_tile = decode_tile(encoded_tile, self._palette)
#decoded_tile = GBTileset(encoded_tile, (8, 8), (8, 8)).to_rgb(self._palette).data
rgba_data = bytearray(len(decoded_tile)*4)
for i, b in enumerate(decoded_tile):
c = ltorgba(b)
rgba_data[4*i+0] = (c >> 24) & 0xff
rgba_data[4*i+1] = (c >> 16) & 0xff
rgba_data[4*i+2] = (c >> 8) & 0xff
rgba_data[4*i+3] = c & 0xff
tile_surface = sdl2.SDL_CreateRGBSurfaceWithFormatFrom(bytes(rgba_data),
TWIDTH, THEIGHT,
32, TWIDTH*4,
sdl2.SDL_PIXELFORMAT_RGBA32)
if not tile_surface:
print(sdl2.SDL_GetError())
raise Exception
x = (tileid % TSWIDTH_TILES) * TWIDTH
y = (tileid // TSWIDTH_TILES) * THEIGHT
#print(x, y)
dst = SDL_Rect(x, y, TWIDTH, THEIGHT)
if sdl2.SDL_BlitSurface(tile_surface, None, self._tileset, dst) < 0:
print(sdl2.SDL_GetError())
raise Exception
sdl2.SDL_FreeSurface(tile_surface)
self._stale_bgtiles |= (1 << tileid)
self._stale_fgtiles |= (1 << tileid)
def _update_vram(self, addr):
"""Update internal dataset (decoded tiles, etc).
If BG display is disabled (lcdc), :py:attr:`GPU._update_tile` will do
nothing. When BG display is enabled, all background tile surfaces will
be decoded.
If FG display is disabled, :py:attr:`GPU._update_fgtile` will also do
nothing. When FG/window display is enabled, all foreground tile surfaces
will be decoded.
Same with sprite display.
"""
if isinstance(addr, str):
# Register
if addr == 'lcdc':
self._update_tilesets()
self._update_surfaces()
self._update_sprite_surface()
elif addr == 'bgp':
self._update_tilesets()
self._update_bgsurface()
self._update_fgsurface()
elif addr == 'obp0':
self._update_sprite_surface()
elif addr == 'obp1':
self._update_sprite_surface()
elif addr == 'scx' or addr == 'scy':
pass
elif addr == 'wx' or addr == 'wy':
pass
elif isinstance(addr, int) and self.lcdc & LCDC_DISPLAY_ENABLE_MASK \
and (self.lcdc & LCDC_WINDOW_DISPLAY_ENABLE_MASK or self.lcdc & LCDC_BG_DISPLAY_MASK):
# VRAM
addr += VRAM_START
# Tilemap data
if 0x9800 <= addr < 0xa000:
if self.lcdc & LCDC_BG_TILE_DISPLAY_SELECT_MASK == 0:
# 0x9800-9bff
tile = addr - 0x9800
else:
# 0x9c00-0x9fff
tile = addr - 0x9c00
if self.lcdc & LCDC_WINDOW_TILE_DISPLAY_SELECT_MASK == 0:
# 0x9800-9bff
tile = addr - 0x9800
else:
# 0x9c00-0x9fff
tile = addr - 0x9c00
#self._update_tile(tile)
self._update_surfaces()
# Tile data
elif 0x8000 <= addr < 0x9800:
if self.lcdc & LCDC_BG_WINDOW_DATA_SELECT_MASK == 0:
# 0x8800-0x97ff
tile = (addr - 0x8800) // 16
else:
# 0x8000-0x8fff
tile = (addr - 0x8000) // 16
#self._update_tilesets()
print(addr, tile)
self._update_tile(tile)
# what is this for? TODO
#self.vram[addr] = val
#hi, lo = self.vram[addr], self.vram[addr+1]
#offset = (addr // 2) * 8
#for i in range(8):
# self.vram[offset+i] = (((hi >> i) & 1) << 1) | ((lo >> i) & 1)
#if self.enabled:
#print('update vram', addr)
self._needs_update = True
#self._update_tilesets()
#self._update_surfaces()
#self._update_bgsurface()
def get_oam(self, addr):
return self.oam[addr]
def set_oam(self, addr, value):
old = self.oam[addr]
self.oam[addr] = value
self.logger.debug('set OAM %#06x=%#06x', OAM_START+addr, value)
if old != value:
self._update_sprite_surface()
@property
def enabled(self):
return (self.lcdc & LCDC_DISPLAY_ENABLE_MASK) or (self.lcdc & LCDC_WINDOW_DISPLAY_ENABLE_MASK) or (self.lcdc & LCDC_SPRITE_DISPLAY_ENABLE_MASK)
@enabled.setter
def enabled(self, value):
if value:
self.lcdc |= 1 << LCDC_DISPLAY_ENABLE_OFFSET
else:
self.lcdc &= 0xff ^ (1 << LCDC_DISPLAY_ENABLE_OFFSET)
| |
# -*- coding: utf-8 -*-
"""
flaskbb.management.views
~~~~~~~~~~~~~~~~~~~~~~~~
This module handles the management views.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
from datetime import datetime
from flask import (Blueprint, current_app, request, redirect, url_for, flash,
__version__ as flask_version)
from flask_login import current_user
from flask_plugins import get_all_plugins, get_plugin, get_plugin_from_all
from flask_babelex import gettext as _
from flaskbb import __version__ as flaskbb_version
from flaskbb._compat import iteritems
from flaskbb.forum.forms import UserSearchForm
from flaskbb.utils.settings import flaskbb_config
from flaskbb.utils.helpers import render_template
from flaskbb.utils.decorators import admin_required, moderator_required
from flaskbb.utils.permissions import can_ban_user, can_edit_user
from flaskbb.extensions import db
from flaskbb.user.models import Guest, User, Group
from flaskbb.forum.models import Post, Topic, Forum, Category, Report
from flaskbb.management.models import Setting, SettingsGroup
from flaskbb.management.forms import (AddUserForm, EditUserForm, AddGroupForm,
EditGroupForm, EditForumForm,
AddForumForm, CategoryForm)
management = Blueprint("management", __name__)
@management.route("/")
@moderator_required
def overview():
python_version = "%s.%s" % (sys.version_info[0], sys.version_info[1])
user_count = User.query.count()
topic_count = Topic.query.count()
post_count = Post.query.count()
return render_template("management/overview.html",
python_version=python_version,
flask_version=flask_version,
flaskbb_version=flaskbb_version,
user_count=user_count,
topic_count=topic_count,
post_count=post_count)
@management.route("/settings", methods=["GET", "POST"])
@management.route("/settings/<path:slug>", methods=["GET", "POST"])
@admin_required
def settings(slug=None):
slug = slug if slug else "general"
# get the currently active group
active_group = SettingsGroup.query.filter_by(key=slug).first_or_404()
# get all groups - used to build the navigation
all_groups = SettingsGroup.query.all()
SettingsForm = Setting.get_form(active_group)
old_settings = Setting.get_settings(active_group)
new_settings = {}
form = SettingsForm()
if form.validate_on_submit():
for key, values in iteritems(old_settings):
try:
# check if the value has changed
if values['value'] == form[key].data:
continue
else:
new_settings[key] = form[key].data
except KeyError:
pass
Setting.update(settings=new_settings, app=current_app)
flash(_("Settings saved."), "success")
else:
for key, values in iteritems(old_settings):
try:
form[key].data = values['value']
except (KeyError, ValueError):
pass
return render_template("management/settings.html", form=form,
all_groups=all_groups, active_group=active_group)
# Users
@management.route("/users", methods=['GET', 'POST'])
@moderator_required
def users():
page = request.args.get("page", 1, type=int)
search_form = UserSearchForm()
if search_form.validate():
users = search_form.get_results().\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("management/users.html", users=users,
search_form=search_form)
users = User.query. \
order_by(User.id.asc()).\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("management/users.html", users=users,
search_form=search_form)
@management.route("/users/<int:user_id>/edit", methods=["GET", "POST"])
@moderator_required
def edit_user(user_id):
user = User.query.filter_by(id=user_id).first_or_404()
if not can_edit_user(current_user):
flash(_("You are not allowed to edit this user."), "danger")
return redirect(url_for("management.users"))
secondary_group_query = Group.query.filter(
db.not_(Group.id == user.primary_group_id),
db.not_(Group.banned),
db.not_(Group.guest == True))
form = EditUserForm(user)
form.secondary_groups.query = secondary_group_query
if form.validate_on_submit():
form.populate_obj(user)
user.primary_group_id = form.primary_group.data.id
# Don't override the password
if form.password.data:
user.password = form.password.data
user.save(groups=form.secondary_groups.data)
flash(_("User successfully updated."), "success")
return redirect(url_for("management.edit_user", user_id=user.id))
return render_template("management/user_form.html", form=form,
title=_("Edit User"))
@management.route("/users/<int:user_id>/delete", methods=["POST"])
@admin_required
def delete_user(user_id):
user = User.query.filter_by(id=user_id).first_or_404()
user.delete()
flash(_("User successfully deleted."), "success")
return redirect(url_for("management.users"))
@management.route("/users/add", methods=["GET", "POST"])
@admin_required
def add_user():
form = AddUserForm()
if form.validate_on_submit():
form.save()
flash(_("User successfully added."), "success")
return redirect(url_for("management.users"))
return render_template("management/user_form.html", form=form,
title=_("Add User"))
@management.route("/users/banned", methods=["GET", "POST"])
@moderator_required
def banned_users():
page = request.args.get("page", 1, type=int)
search_form = UserSearchForm()
users = User.query.filter(
Group.banned == True,
Group.id == User.primary_group_id
).paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
if search_form.validate():
users = search_form.get_results().\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("management/banned_users.html", users=users,
search_form=search_form)
return render_template("management/banned_users.html", users=users,
search_form=search_form)
@management.route("/users/<int:user_id>/ban", methods=["POST"])
@moderator_required
def ban_user(user_id):
if not can_ban_user(current_user):
flash(_("You do not have the permissions to ban this user."), "danger")
return redirect(url_for("management.overview"))
user = User.query.filter_by(id=user_id).first_or_404()
# Do not allow moderators to ban admins
if user.get_permissions()['admin'] and \
(current_user.permissions['mod'] or
current_user.permissions['super_mod']):
flash(_("A moderator cannot ban an admin user."), "danger")
return redirect(url_for("management.overview"))
if user.ban():
flash(_("User is now banned."), "success")
else:
flash(_("Could not ban user."), "danger")
return redirect(url_for("management.banned_users"))
@management.route("/users/<int:user_id>/unban", methods=["POST"])
@moderator_required
def unban_user(user_id):
if not can_ban_user(current_user):
flash(_("You do not have the permissions to unban this user."),
"danger")
return redirect(url_for("management.overview"))
user = User.query.filter_by(id=user_id).first_or_404()
if user.unban():
flash(_("User is now unbanned."), "success")
else:
flash(_("Could not unban user."), "danger")
return redirect(url_for("management.banned_users"))
# Reports
@management.route("/reports")
@moderator_required
def reports():
page = request.args.get("page", 1, type=int)
reports = Report.query.\
order_by(Report.id.asc()).\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("management/reports.html", reports=reports)
@management.route("/reports/unread")
@moderator_required
def unread_reports():
page = request.args.get("page", 1, type=int)
reports = Report.query.\
filter(Report.zapped == None).\
order_by(Report.id.desc()).\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("management/unread_reports.html", reports=reports)
@management.route("/reports/<int:report_id>/markread", methods=["POST"])
@management.route("/reports/markread", methods=["POST"])
@moderator_required
def report_markread(report_id=None):
# mark single report as read
if report_id:
report = Report.query.filter_by(id=report_id).first_or_404()
if report.zapped:
flash(_("Report %(id)s is already marked as read.", id=report.id),
"success")
return redirect(url_for("management.reports"))
report.zapped_by = current_user.id
report.zapped = datetime.utcnow()
report.save()
flash(_("Report %(id)s marked as read.", id=report.id), "success")
return redirect(url_for("management.reports"))
# mark all as read
reports = Report.query.filter(Report.zapped == None).all()
report_list = []
for report in reports:
report.zapped_by = current_user.id
report.zapped = datetime.utcnow()
report_list.append(report)
db.session.add_all(report_list)
db.session.commit()
flash(_("All reports were marked as read."), "success")
return redirect(url_for("management.reports"))
# Groups
@management.route("/groups")
@admin_required
def groups():
page = request.args.get("page", 1, type=int)
groups = Group.query.\
order_by(Group.id.asc()).\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("management/groups.html", groups=groups)
@management.route("/groups/<int:group_id>/edit", methods=["GET", "POST"])
@admin_required
def edit_group(group_id):
group = Group.query.filter_by(id=group_id).first_or_404()
form = EditGroupForm(group)
if form.validate_on_submit():
form.populate_obj(group)
group.save()
if group.guest:
Guest.invalidate_cache()
flash(_("Group successfully updated."), "success")
return redirect(url_for("management.groups", group_id=group.id))
return render_template("management/group_form.html", form=form,
title=_("Edit Group"))
@management.route("/groups/<int:group_id>/delete", methods=["POST"])
@admin_required
def delete_group(group_id):
group = Group.query.filter_by(id=group_id).first_or_404()
group.delete()
flash(_("Group successfully deleted."), "success")
return redirect(url_for("management.groups"))
@management.route("/groups/add", methods=["GET", "POST"])
@admin_required
def add_group():
form = AddGroupForm()
if form.validate_on_submit():
form.save()
flash(_("Group successfully added."), "success")
return redirect(url_for("management.groups"))
return render_template("management/group_form.html", form=form,
title=_("Add Group"))
# Forums and Categories
@management.route("/forums")
@admin_required
def forums():
categories = Category.query.order_by(Category.position.asc()).all()
return render_template("management/forums.html", categories=categories)
@management.route("/forums/<int:forum_id>/edit", methods=["GET", "POST"])
@admin_required
def edit_forum(forum_id):
forum = Forum.query.filter_by(id=forum_id).first_or_404()
form = EditForumForm(forum)
if form.validate_on_submit():
form.save()
flash(_("Forum successfully updated."), "success")
return redirect(url_for("management.edit_forum", forum_id=forum.id))
else:
if forum.moderators:
form.moderators.data = ",".join([
user.username for user in forum.moderators
])
else:
form.moderators.data = None
return render_template("management/forum_form.html", form=form,
title=_("Edit Forum"))
@management.route("/forums/<int:forum_id>/delete", methods=["POST"])
@admin_required
def delete_forum(forum_id):
forum = Forum.query.filter_by(id=forum_id).first_or_404()
involved_users = User.query.filter(Topic.forum_id == forum.id,
Post.user_id == User.id).all()
forum.delete(involved_users)
flash(_("Forum successfully deleted."), "success")
return redirect(url_for("management.forums"))
@management.route("/forums/add", methods=["GET", "POST"])
@management.route("/forums/<int:category_id>/add", methods=["GET", "POST"])
@admin_required
def add_forum(category_id=None):
form = AddForumForm()
if form.validate_on_submit():
form.save()
flash(_("Forum successfully added."), "success")
return redirect(url_for("management.forums"))
else:
form.groups.data = Group.query.order_by(Group.id.asc()).all()
if category_id:
category = Category.query.filter_by(id=category_id).first()
form.category.data = category
return render_template("management/forum_form.html", form=form,
title=_("Add Forum"))
@management.route("/category/add", methods=["GET", "POST"])
@admin_required
def add_category():
form = CategoryForm()
if form.validate_on_submit():
form.save()
flash(_("Category successfully added."), "success")
return redirect(url_for("management.forums"))
return render_template("management/category_form.html", form=form,
title=_("Add Category"))
@management.route("/category/<int:category_id>/edit", methods=["GET", "POST"])
@admin_required
def edit_category(category_id):
category = Category.query.filter_by(id=category_id).first_or_404()
form = CategoryForm(obj=category)
if form.validate_on_submit():
form.populate_obj(category)
flash(_("Category successfully updated."), "success")
category.save()
return render_template("management/category_form.html", form=form,
title=_("Edit Category"))
@management.route("/category/<int:category_id>/delete", methods=["POST"])
@admin_required
def delete_category(category_id):
category = Category.query.filter_by(id=category_id).first_or_404()
involved_users = User.query.filter(Forum.category_id == category.id,
Topic.forum_id == Forum.id,
Post.user_id == User.id).all()
category.delete(involved_users)
flash(_("Category with all associated forums deleted."), "success")
return redirect(url_for("management.forums"))
# Plugins
@management.route("/plugins")
@admin_required
def plugins():
plugins = get_all_plugins()
return render_template("management/plugins.html", plugins=plugins)
@management.route("/plugins/<path:plugin>/enable", methods=["POST"])
@admin_required
def enable_plugin(plugin):
plugin = get_plugin_from_all(plugin)
if not plugin.enabled:
plugin_dir = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
"plugins", plugin.identifier
)
disabled_file = os.path.join(plugin_dir, "DISABLED")
try:
if os.path.exists(disabled_file):
os.remove(disabled_file)
flash(_("Plugin is enabled. Please reload your app."),
"success")
else:
flash(_("Plugin is already enabled. Please reload your app."),
"warning")
except OSError:
flash(_("If you are using a host which doesn't support writting "
"on the disk, this won't work - than you need to delete "
"the 'DISABLED' file by yourself."), "danger")
else:
flash(_("Couldn't enable Plugin."), "danger")
return redirect(url_for("management.plugins"))
@management.route("/plugins/<path:plugin>/disable", methods=["POST"])
@admin_required
def disable_plugin(plugin):
try:
plugin = get_plugin(plugin)
except KeyError:
flash(_("Plugin %(plugin)s not found.", plugin=plugin.name), "danger")
return redirect(url_for("management.plugins"))
plugin_dir = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
"plugins", plugin.identifier
)
disabled_file = os.path.join(plugin_dir, "DISABLED")
try:
open(disabled_file, "a").close()
flash(_("Plugin is disabled. Please reload your app."), "success")
except OSError:
flash(_("If you are using a host which doesn't "
"support writting on the disk, this won't work - than you "
"need to create a 'DISABLED' file by yourself."), "info")
return redirect(url_for("management.plugins"))
@management.route("/plugins/<path:plugin>/uninstall", methods=["POST"])
@admin_required
def uninstall_plugin(plugin):
plugin = get_plugin_from_all(plugin)
if plugin.uninstallable:
plugin.uninstall()
Setting.invalidate_cache()
flash(_("Plugin has been uninstalled."), "success")
else:
flash(_("Cannot uninstall Plugin."), "danger")
return redirect(url_for("management.plugins"))
@management.route("/plugins/<path:plugin>/install", methods=["POST"])
@admin_required
def install_plugin(plugin):
plugin = get_plugin_from_all(plugin)
if plugin.installable and not plugin.uninstallable:
plugin.install()
Setting.invalidate_cache()
flash(_("Plugin has been installed."), "success")
else:
flash(_("Cannot install Plugin."), "danger")
return redirect(url_for("management.plugins"))
| |
#!/usr/bin/python
import numpy
import h5py
from optparse import OptionParser
import matplotlib
import os.path
parser = OptionParser()
parser.add_option("--folder", type="string", default='full/pass1/', dest="folder", help="folder of the output data to be plotted")
parser.add_option("--imageFileName", type="string", default='image001.h5', dest="imageFileName", help="folder of the output data to be plotted")
parser.add_option("--savePlots", action="store_true", dest="savePlots", help="include this flag save plots to files instead of displaying them")
parser.add_option("--gridFileName", type="string", default='outGridVelocity.h5', dest="gridFileName")
parser.add_option("--scatterFileName", type="string", default='outScatteredVelocity.h5', dest="scatterFileName")
parser.add_option("--figurePrefix", type="string", default='fig', dest="figurePrefix")
parser.add_option("--tiePointsFolder", type="string", default='_work', dest="tiePointsFolder")
options, args = parser.parse_args()
if options.savePlots:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
folder = options.folder
scatterFileName = '%s/%s'%(folder,options.scatterFileName)
if(not os.path.exists(scatterFileName)):
#print "not found:", scatterFileName
exit()
gridFileName = '%s/%s'%(folder,options.gridFileName)
if(not os.path.exists(gridFileName)):
print "not found:", gridFileName
exit()
tiePointsFileName = '%s/%s/combinedCorrelationTiePoints.h5'%(folder,options.tiePointsFolder)
imageFileName = options.imageFileName
if(not os.path.exists(imageFileName)):
print "not found:", imageFileName
exit()
# plot a velocity vector every "skip" pixels from the gridded velocity data
skip = 8
# width and height of each figure (inches)
width = 12
height = 6.75
# number of points to be sampled from the scattered data
maxPoints = 10000
# the locations of the major and minor axes to plot
x0 = 319
y0 = -20.5
# the width around each axis to take points from when plotting axes
dx = 0.5
dy = 0.5
maxImageStd = 3.0 # Image data is clamped to be within 3 std. dev. from the mean.
# Decrease this number to increase image contrast.
# the data is cropped to be within this box [leftLon,rightLon,botLat,topLat]
cropBounds = [328.1011223432933,
308.8828490718264,
-27.212863196130208,
-12.803165388750081]
#cropBounds = [320,295,25,39]
#ellipseBounds = [310,302,33,40]
ellipseBounds = cropBounds
# decrease these numbers to increase the length of vectors, and visa versa
scatterVectorScale = 400.0
gridVectorScale = 800.0
h5File = h5py.File(imageFileName, 'r')
bounds = h5File["bounds"][...]
imageData = h5File["data"][...]
imageMask = numpy.array(h5File["mask"][...],bool)
h5File.close()
h5File = h5py.File(scatterFileName, 'r')
x = h5File["x"][...]
y = h5File["y"][...]
vx = h5File["vx"][...]
vy = h5File["vy"][...]
pixelVx = h5File["dataX"][...]
pixelVy = h5File["dataY"][...]
h5File.close()
h5File = h5py.File(tiePointsFileName, 'r')
deltaTs = h5File["deltaTs"][...]
residualsFound = "correlationVelocityResiduals" in h5File
if residualsFound:
correlationVelocityResiduals = h5File["correlationVelocityResiduals"][...]
correlationLocationResiduals = h5File["correlationLocationResiduals"][...]
h5File.close()
maxDeltaT = numpy.amax(deltaTs)
#print numpy.amax(numpy.abs(vx))
#print numpy.amax(numpy.abs(vy))
h5File = h5py.File(gridFileName, 'r')
gridVx = h5File["vx"][...]
gridVy = h5File["vy"][...]
h5File.close()
gx = numpy.linspace(bounds[0],bounds[1],gridVx.shape[1])
gy = numpy.linspace(bounds[2],bounds[3],gridVx.shape[0])
#print numpy.amax(numpy.abs(gridVx))
#print numpy.amax(numpy.abs(gridVy))
dLon = gx[1]-gx[0]
dLat = gy[1]-gy[0]
pixelVx = pixelVx/dLon*maxDeltaT
pixelVy = pixelVy/dLat*maxDeltaT
xMin = numpy.argmin(numpy.abs(gx-cropBounds[0]))
xMax = numpy.argmin(numpy.abs(gx-cropBounds[1]))+1
yMin = numpy.argmin(numpy.abs(gy-cropBounds[2]))
yMax = numpy.argmin(numpy.abs(gy-cropBounds[3]))+1
imageCropped = imageData[yMin:yMax,xMin:xMax]
maskCropped = imageMask[yMin:yMax,xMin:xMax]
boundsCropped = [gx[xMin],gx[xMax-1],gy[yMin],gy[yMax-1]]
# crop the gridded velocity
gridVx = gridVx[yMin:yMax,xMin:xMax]
gridVy = gridVy[yMin:yMax,xMin:xMax]
[gridX, gridY] = numpy.meshgrid(gx[xMin:xMax],gy[yMin:yMax])
# crop the scattered velocity
mask = numpy.logical_and(
numpy.logical_and(x >= boundsCropped[1],x <= boundsCropped[0]),
numpy.logical_and(y >= boundsCropped[2],y <= boundsCropped[3]))
x = x[mask]
y = y[mask]
vx = vx[mask]
vy = vy[mask]
pixelVx = pixelVx[mask]
pixelVy = pixelVy[mask]
vMag = numpy.sqrt(vx**2+vy**2)
xc = 0.5*(ellipseBounds[0]+ellipseBounds[1])
yc = 0.5*(ellipseBounds[2]+ellipseBounds[3])
xr = 0.5*(ellipseBounds[0]-ellipseBounds[1])
yr = 0.5*(ellipseBounds[3]-ellipseBounds[2])
ellipseMask = ((x-xc)/xr)**2 + ((y-yc)/yr)**2 <= 1.0
mask = numpy.logical_and(ellipseMask,numpy.abs(y-y0) < dy)
vxMean = numpy.mean(vx[mask])
print "mean vx along the major axis:", vxMean
vxMean = 0.0
imageMean = numpy.mean(imageCropped[maskCropped])
imageStd = numpy.std(imageCropped[maskCropped])
imageCropped *= maskCropped
imageCropped = numpy.maximum(imageMean-maxImageStd*imageStd,
numpy.minimum(imageMean+maxImageStd*imageStd,imageCropped))
if(x.size > maxPoints):
indices = numpy.array(numpy.random.rand(maxPoints)*x.size,int)
else:
indices = numpy.array(numpy.linspace(0,x.size-1,x.size),int)
colorList1 = numpy.array(((0.0,0.0,0.0),
(1.0,0.0,0.0),
(1.0,1.0,0.0),
(1.0,1.0,1.0)))
colorList2 = numpy.array(((0.5,0.5,0.5),
(0.67,0.67,0.67),
(0.83,0.83,0.83),
(1.0,1.0,1.0)))
alpha = 0.25
colorList = alpha*colorList1 + (1-alpha)*colorList2
colorList = tuple(tuple(x) for x in colorList)
colormap = colors.LinearSegmentedColormap.from_list('my_map',colorList,N=256)
maskXAxis = numpy.abs(y-y0) < dy
maskYAxis = numpy.abs(x-x0) < dx
xAxisIndices = indices[maskXAxis[indices]]
yAxisIndices = indices[maskYAxis[indices]]
xAxisGridIndex = numpy.argmin(numpy.abs(gy-y0))
yAxisGridIndex = numpy.argmin(numpy.abs(gx-x0))
fig = plt.figure(1, figsize=[width,height])
#fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111)
plt.imshow(imageCropped, extent=(boundsCropped[0],boundsCropped[1],boundsCropped[3],boundsCropped[2]), cmap=colormap)
ax.set_ylim(ax.get_ylim()[::-1])
plt.quiver(x[indices], y[indices], vx[indices]-vxMean, vy[indices], color='k', pivot='mid', scale_units='xy', scale=scatterVectorScale)
plt.quiver(x[xAxisIndices], y[xAxisIndices], vx[xAxisIndices]-vxMean, vy[xAxisIndices], color='r', pivot='mid', scale_units='xy', scale=scatterVectorScale)
plt.quiver(x[yAxisIndices], y[yAxisIndices], vx[yAxisIndices]-vxMean, vy[yAxisIndices], color='b', pivot='mid', scale_units='xy', scale=scatterVectorScale)
plt.title('a sample of %i scattered velocity vectors'%(indices.size))
ax.set_aspect('equal')
ax.autoscale(tight=True)
#fig = plt.figure(12, figsize=[width,height])
##fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
#ax = fig.add_subplot(111)
#plt.imshow(imageCropped, extent=(boundsCropped[0],boundsCropped[1],boundsCropped[3],boundsCropped[2]), cmap=colormap)
#ax.set_ylim(ax.get_ylim()[::-1])
#plt.axis('tight')
fig = plt.figure(2, figsize=[width,height])
#fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111)
plt.imshow(imageCropped, extent=(boundsCropped[0],boundsCropped[1],boundsCropped[3],boundsCropped[2]), cmap=colormap)
ax.set_ylim(ax.get_ylim()[::-1])
plt.quiver(gridX[::skip,::skip], gridY[::skip,::skip], gridVx[::skip,::skip]-vxMean, gridVy[::skip,::skip], color='k', pivot='mid', scale_units='xy', scale=gridVectorScale)
plt.title('gridded velocity vector (skip = %i)'%skip)
ax.set_aspect('equal')
ax.autoscale(tight=True)
fig = plt.figure(3, figsize=[width,height])
#fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111)
plt.imshow(gridVx-vxMean, extent=(boundsCropped[0],boundsCropped[1],boundsCropped[3],boundsCropped[2]), cmap=plt.get_cmap('jet'))
ax.set_ylim(ax.get_ylim()[::-1])
plt.colorbar()
plt.title('vx')
ax.set_aspect('equal')
ax.autoscale(tight=True)
fig = plt.figure(4, figsize=[width,height])
#fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111)
plt.imshow(gridVy, extent=(boundsCropped[0],boundsCropped[1],boundsCropped[3],boundsCropped[2]), cmap=plt.get_cmap('jet'))
ax.set_ylim(ax.get_ylim()[::-1])
plt.colorbar()
plt.title('vy')
ax.set_aspect('equal')
ax.autoscale(tight=True)
fig = plt.figure(5, figsize=[width,height])
#fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111)
plt.imshow(numpy.sqrt((gridVx-vxMean)**2 + gridVy**2), extent=(boundsCropped[0],boundsCropped[1],boundsCropped[3],boundsCropped[2]), cmap=plt.get_cmap('jet'))
ax.set_ylim(ax.get_ylim()[::-1])
plt.colorbar()
plt.title('|v|')
ax.set_aspect('equal')
ax.autoscale(tight=True)
weights = numpy.ones(vx.shape)/vx.size
fig = plt.figure(6, figsize=[width,height])
#fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111)
plt.hist(vMag,100,weights=weights,histtype='step')
plt.hist(vx,100,weights=weights,histtype='step')
plt.hist(vy,100,weights=weights,histtype='step')
plt.xlabel('velocity')
plt.ylabel('tie point fraction')
plt.title('velocity histograms')
plt.axis('tight')
plt.legend(['|v|','vx','vy'])
fig = plt.figure(7, figsize=[width,height])
#fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111)
plt.hist(numpy.sqrt(pixelVx**2+pixelVy**2),100,weights=weights,histtype='step')
plt.hist(pixelVx,100,weights=weights,histtype='step')
plt.hist(pixelVy,100,weights=weights,histtype='step')
plt.xlabel('velocity*maxDeltaT (pixels)')
plt.ylabel('tie point fraction')
plt.title('pixel offset histograms (search range)')
plt.axis('tight')
plt.legend(['|v|','vx','vy'])
#x0 = 0.5*(boundsCropped[0]+boundsCropped[1])
#y0 = 0.5*(boundsCropped[2]+boundsCropped[3])
#dx = 0.02*(boundsCropped[1]-boundsCropped[0])
#dy = 0.02*(boundsCropped[3]-boundsCropped[2])
fig = plt.figure(8, figsize=[width,height])
#fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111)
plt.plot(x[maskXAxis], vy[maskXAxis], '.k',gx,gridVy[xAxisGridIndex,:],'r')
plt.title('vy along x axis within dy = %.1f of y = %.1f'%(dy,y0))
plt.xlabel('x')
plt.ylabel('vy')
plt.axis('tight')
ax.set_xlim(ax.get_xlim()[::-1])
fig = plt.figure(9, figsize=[width,height])
#fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111)
plt.plot(vx[maskYAxis]-vxMean, y[maskYAxis], '.k',gridVx[:,yAxisGridIndex],gy,'b')
plt.title('vx along y axis within dx = %.1f of x = %.1f'%(dx,x0))
plt.xlabel('vx')
plt.ylabel('y')
plt.axis('tight')
if residualsFound:
maxVal = 6.0*numpy.median(correlationVelocityResiduals)
fig = plt.figure(10, figsize=[width,height])
#fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111)
weights = numpy.ones(correlationVelocityResiduals.shape)/correlationVelocityResiduals.size
plt.hist(correlationVelocityResiduals,100,range=[0.0,maxVal], weights=weights,histtype='step')
plt.xlabel('correlation velocity uncertainty')
plt.ylabel('tie point fraction')
maxVal = 6.0*numpy.median(correlationLocationResiduals)/1000
fig = plt.figure(11, figsize=[width,height])
#fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111)
weights = numpy.ones(correlationLocationResiduals.shape)/correlationLocationResiduals.size
plt.hist(correlationLocationResiduals/1000,100,range=[0.0,maxVal], weights=weights,histtype='step')
plt.xlabel('correlation location uncertainty (km)')
plt.ylabel('tie point fraction')
plt.draw()
if options.savePlots:
lastFig = 9
if residualsFound:
lastFig = 11
for index in range(1,lastFig+1):
outFileName = '%s/%s%03i.png'%(folder,options.figurePrefix, index)
plt.figure(index)
plt.savefig(outFileName)
else:
plt.show()
| |
"""Support for MAX! Thermostats via MAX! Cube."""
import logging
import socket
from maxcube.device import (
MAX_DEVICE_MODE_AUTOMATIC,
MAX_DEVICE_MODE_BOOST,
MAX_DEVICE_MODE_MANUAL,
MAX_DEVICE_MODE_VACATION,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_COMFORT,
PRESET_ECO,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from . import DATA_KEY
_LOGGER = logging.getLogger(__name__)
ATTR_VALVE_POSITION = "valve_position"
PRESET_ON = "on"
# There are two magic temperature values, which indicate:
# Off (valve fully closed)
OFF_TEMPERATURE = 4.5
# On (valve fully open)
ON_TEMPERATURE = 30.5
# Lowest Value without turning off
MIN_TEMPERATURE = 5.0
# Largest Value without fully opening
MAX_TEMPERATURE = 30.0
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
HASS_PRESET_TO_MAX_MODE = {
PRESET_AWAY: MAX_DEVICE_MODE_VACATION,
PRESET_BOOST: MAX_DEVICE_MODE_BOOST,
PRESET_NONE: MAX_DEVICE_MODE_AUTOMATIC,
PRESET_ON: MAX_DEVICE_MODE_MANUAL,
}
MAX_MODE_TO_HASS_PRESET = {
MAX_DEVICE_MODE_AUTOMATIC: PRESET_NONE,
MAX_DEVICE_MODE_BOOST: PRESET_BOOST,
MAX_DEVICE_MODE_MANUAL: PRESET_NONE,
MAX_DEVICE_MODE_VACATION: PRESET_AWAY,
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Iterate through all MAX! Devices and add thermostats."""
devices = []
for handler in hass.data[DATA_KEY].values():
cube = handler.cube
for device in cube.devices:
name = f"{cube.room_by_id(device.room_id).name} {device.name}"
if cube.is_thermostat(device) or cube.is_wallthermostat(device):
devices.append(MaxCubeClimate(handler, name, device.rf_address))
if devices:
add_entities(devices)
class MaxCubeClimate(ClimateEntity):
"""MAX! Cube ClimateEntity."""
def __init__(self, handler, name, rf_address):
"""Initialize MAX! Cube ClimateEntity."""
self._name = name
self._rf_address = rf_address
self._cubehandle = handler
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def min_temp(self):
"""Return the minimum temperature."""
device = self._cubehandle.cube.device_by_rf(self._rf_address)
if device.min_temperature is None:
return MIN_TEMPERATURE
return device.min_temperature
@property
def max_temp(self):
"""Return the maximum temperature."""
device = self._cubehandle.cube.device_by_rf(self._rf_address)
if device.max_temperature is None:
return MAX_TEMPERATURE
return device.max_temperature
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
device = self._cubehandle.cube.device_by_rf(self._rf_address)
return device.actual_temperature
@property
def hvac_mode(self):
"""Return current operation mode."""
device = self._cubehandle.cube.device_by_rf(self._rf_address)
if device.mode in [MAX_DEVICE_MODE_AUTOMATIC, MAX_DEVICE_MODE_BOOST]:
return HVAC_MODE_AUTO
if (
device.mode == MAX_DEVICE_MODE_MANUAL
and device.target_temperature == OFF_TEMPERATURE
):
return HVAC_MODE_OFF
return HVAC_MODE_HEAT
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return [HVAC_MODE_OFF, HVAC_MODE_AUTO, HVAC_MODE_HEAT]
def set_hvac_mode(self, hvac_mode: str):
"""Set new target hvac mode."""
device = self._cubehandle.cube.device_by_rf(self._rf_address)
temp = device.target_temperature
mode = MAX_DEVICE_MODE_MANUAL
if hvac_mode == HVAC_MODE_OFF:
temp = OFF_TEMPERATURE
elif hvac_mode != HVAC_MODE_HEAT:
# Reset the temperature to a sane value.
# Ideally, we should send 0 and the device will set its
# temperature according to the schedule. However, current
# version of the library has a bug which causes an
# exception when setting values below 8.
if temp in [OFF_TEMPERATURE, ON_TEMPERATURE]:
temp = device.eco_temperature
mode = MAX_DEVICE_MODE_AUTOMATIC
cube = self._cubehandle.cube
with self._cubehandle.mutex:
try:
cube.set_temperature_mode(device, temp, mode)
except (socket.timeout, OSError):
_LOGGER.error("Setting HVAC mode failed")
return
@property
def hvac_action(self):
"""Return the current running hvac operation if supported."""
cube = self._cubehandle.cube
device = cube.device_by_rf(self._rf_address)
valve = 0
if cube.is_thermostat(device):
valve = device.valve_position
elif cube.is_wallthermostat(device):
for device in cube.devices_by_room(cube.room_by_id(device.room_id)):
if cube.is_thermostat(device) and device.valve_position > 0:
valve = device.valve_position
break
else:
return None
# Assume heating when valve is open
if valve > 0:
return CURRENT_HVAC_HEAT
return (
CURRENT_HVAC_OFF if self.hvac_mode == HVAC_MODE_OFF else CURRENT_HVAC_IDLE
)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
device = self._cubehandle.cube.device_by_rf(self._rf_address)
if (
device.target_temperature is None
or device.target_temperature < self.min_temp
or device.target_temperature > self.max_temp
):
return None
return device.target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_TEMPERATURE) is None:
return False
target_temperature = kwargs.get(ATTR_TEMPERATURE)
device = self._cubehandle.cube.device_by_rf(self._rf_address)
cube = self._cubehandle.cube
with self._cubehandle.mutex:
try:
cube.set_target_temperature(device, target_temperature)
except (socket.timeout, OSError):
_LOGGER.error("Setting target temperature failed")
return False
@property
def preset_mode(self):
"""Return the current preset mode."""
device = self._cubehandle.cube.device_by_rf(self._rf_address)
if self.hvac_mode == HVAC_MODE_OFF:
return PRESET_NONE
if device.mode == MAX_DEVICE_MODE_MANUAL:
if device.target_temperature == device.comfort_temperature:
return PRESET_COMFORT
if device.target_temperature == device.eco_temperature:
return PRESET_ECO
if device.target_temperature == ON_TEMPERATURE:
return PRESET_ON
return PRESET_NONE
return MAX_MODE_TO_HASS_PRESET[device.mode]
@property
def preset_modes(self):
"""Return available preset modes."""
return [
PRESET_NONE,
PRESET_BOOST,
PRESET_COMFORT,
PRESET_ECO,
PRESET_AWAY,
PRESET_ON,
]
def set_preset_mode(self, preset_mode):
"""Set new operation mode."""
device = self._cubehandle.cube.device_by_rf(self._rf_address)
temp = device.target_temperature
mode = MAX_DEVICE_MODE_AUTOMATIC
if preset_mode in [PRESET_COMFORT, PRESET_ECO, PRESET_ON]:
mode = MAX_DEVICE_MODE_MANUAL
if preset_mode == PRESET_COMFORT:
temp = device.comfort_temperature
elif preset_mode == PRESET_ECO:
temp = device.eco_temperature
else:
temp = ON_TEMPERATURE
else:
mode = HASS_PRESET_TO_MAX_MODE[preset_mode] or MAX_DEVICE_MODE_AUTOMATIC
with self._cubehandle.mutex:
try:
self._cubehandle.cube.set_temperature_mode(device, temp, mode)
except (socket.timeout, OSError):
_LOGGER.error("Setting operation mode failed")
return
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
cube = self._cubehandle.cube
device = cube.device_by_rf(self._rf_address)
if not cube.is_thermostat(device):
return {}
return {ATTR_VALVE_POSITION: device.valve_position}
def update(self):
"""Get latest data from MAX! Cube."""
self._cubehandle.update()
| |
"""Support for control of ElkM1 sensors."""
from homeassistant.components.elkm1 import (
DOMAIN as ELK_DOMAIN, create_elk_entities, ElkEntity)
DEPENDENCIES = [ELK_DOMAIN]
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Create the Elk-M1 sensor platform."""
if discovery_info is None:
return
elk = hass.data[ELK_DOMAIN]['elk']
entities = create_elk_entities(
hass, elk.counters, 'counter', ElkCounter, [])
entities = create_elk_entities(
hass, elk.keypads, 'keypad', ElkKeypad, entities)
entities = create_elk_entities(
hass, [elk.panel], 'panel', ElkPanel, entities)
entities = create_elk_entities(
hass, elk.settings, 'setting', ElkSetting, entities)
entities = create_elk_entities(
hass, elk.zones, 'zone', ElkZone, entities)
async_add_entities(entities, True)
def temperature_to_state(temperature, undefined_temperature):
"""Convert temperature to a state."""
return temperature if temperature > undefined_temperature else None
class ElkSensor(ElkEntity):
"""Base representation of Elk-M1 sensor."""
def __init__(self, element, elk, elk_data):
"""Initialize the base of all Elk sensors."""
super().__init__(element, elk, elk_data)
self._state = None
@property
def state(self):
"""Return the state of the sensor."""
return self._state
class ElkCounter(ElkSensor):
"""Representation of an Elk-M1 Counter."""
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:numeric'
def _element_changed(self, element, changeset):
self._state = self._element.value
class ElkKeypad(ElkSensor):
"""Representation of an Elk-M1 Keypad."""
@property
def temperature_unit(self):
"""Return the temperature unit."""
return self._temperature_unit
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._temperature_unit
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:thermometer-lines'
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
from elkm1_lib.util import username
attrs = self.initial_attrs()
attrs['area'] = self._element.area + 1
attrs['temperature'] = self._element.temperature
attrs['last_user_time'] = self._element.last_user_time.isoformat()
attrs['last_user'] = self._element.last_user + 1
attrs['code'] = self._element.code
attrs['last_user_name'] = username(self._elk, self._element.last_user)
attrs['last_keypress'] = self._element.last_keypress
return attrs
def _element_changed(self, element, changeset):
self._state = temperature_to_state(self._element.temperature, -40)
async def async_added_to_hass(self):
"""Register callback for ElkM1 changes and update entity state."""
await super().async_added_to_hass()
self.hass.data[ELK_DOMAIN]['keypads'][
self._element.index] = self.entity_id
class ElkPanel(ElkSensor):
"""Representation of an Elk-M1 Panel."""
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:home"
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
attrs = self.initial_attrs()
attrs['system_trouble_status'] = self._element.system_trouble_status
return attrs
def _element_changed(self, element, changeset):
if self._elk.is_connected():
self._state = 'Paused' if self._element.remote_programming_status \
else 'Connected'
else:
self._state = 'Disconnected'
class ElkSetting(ElkSensor):
"""Representation of an Elk-M1 Setting."""
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:numeric'
def _element_changed(self, element, changeset):
self._state = self._element.value
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
from elkm1_lib.const import SettingFormat
attrs = self.initial_attrs()
attrs['value_format'] = SettingFormat(
self._element.value_format).name.lower()
return attrs
class ElkZone(ElkSensor):
"""Representation of an Elk-M1 Zone."""
@property
def icon(self):
"""Icon to use in the frontend."""
from elkm1_lib.const import ZoneType
zone_icons = {
ZoneType.FIRE_ALARM.value: 'fire',
ZoneType.FIRE_VERIFIED.value: 'fire',
ZoneType.FIRE_SUPERVISORY.value: 'fire',
ZoneType.KEYFOB.value: 'key',
ZoneType.NON_ALARM.value: 'alarm-off',
ZoneType.MEDICAL_ALARM.value: 'medical-bag',
ZoneType.POLICE_ALARM.value: 'alarm-light',
ZoneType.POLICE_NO_INDICATION.value: 'alarm-light',
ZoneType.KEY_MOMENTARY_ARM_DISARM.value: 'power',
ZoneType.KEY_MOMENTARY_ARM_AWAY.value: 'power',
ZoneType.KEY_MOMENTARY_ARM_STAY.value: 'power',
ZoneType.KEY_MOMENTARY_DISARM.value: 'power',
ZoneType.KEY_ON_OFF.value: 'toggle-switch',
ZoneType.MUTE_AUDIBLES.value: 'volume-mute',
ZoneType.POWER_SUPERVISORY.value: 'power-plug',
ZoneType.TEMPERATURE.value: 'thermometer-lines',
ZoneType.ANALOG_ZONE.value: 'speedometer',
ZoneType.PHONE_KEY.value: 'phone-classic',
ZoneType.INTERCOM_KEY.value: 'deskphone'
}
return 'mdi:{}'.format(
zone_icons.get(self._element.definition, 'alarm-bell'))
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
from elkm1_lib.const import (
ZoneLogicalStatus, ZonePhysicalStatus, ZoneType)
attrs = self.initial_attrs()
attrs['physical_status'] = ZonePhysicalStatus(
self._element.physical_status).name.lower()
attrs['logical_status'] = ZoneLogicalStatus(
self._element.logical_status).name.lower()
attrs['definition'] = ZoneType(
self._element.definition).name.lower()
attrs['area'] = self._element.area + 1
attrs['bypassed'] = self._element.bypassed
attrs['triggered_alarm'] = self._element.triggered_alarm
return attrs
@property
def temperature_unit(self):
"""Return the temperature unit."""
from elkm1_lib.const import ZoneType
if self._element.definition == ZoneType.TEMPERATURE.value:
return self._temperature_unit
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
from elkm1_lib.const import ZoneType
if self._element.definition == ZoneType.TEMPERATURE.value:
return self._temperature_unit
if self._element.definition == ZoneType.ANALOG_ZONE.value:
return 'V'
return None
def _element_changed(self, element, changeset):
from elkm1_lib.const import ZoneLogicalStatus, ZoneType
from elkm1_lib.util import pretty_const
if self._element.definition == ZoneType.TEMPERATURE.value:
self._state = temperature_to_state(self._element.temperature, -60)
elif self._element.definition == ZoneType.ANALOG_ZONE.value:
self._state = self._element.voltage
else:
self._state = pretty_const(ZoneLogicalStatus(
self._element.logical_status).name)
| |
__author__ = 'foxtrot'
import tweepy
from tweepy import Stream
import json
import indicoio
import time
import threading
from googleapiclient.discovery import build
from MyState import *
import urllib2
# Tokens and keys
consumer_key = "nEcXxJ8rQ7UyDrPYzzDTFScLl"
consumer_secret = "60GrqyEeVwLLP5fLnx6OUtAixrAGpinZ1eBcujwCi4xKRutSPz"
access_token = "23385479-1AuhkNFfVDuzScTDh6cQzS5YoBjTLbA2h4qp1VGdj"
access_token_secret = "0tza6LdGohaT7QqU6L9oyAT7c8ifWNqNEy1FoL9lc7Old"
indicoio.config.api_key = 'e30201b851eb84179e68bf31aa36684a'
googleAPIKey = 'AIzaSyBo1twK-Cb-Mb1q7YCK1HaLFBCLb6BWwrc'
# Globals
MAX_CACHE_NUMBER = 1000 # Cache only 1k points
KeywordsToFilter = list() # Can only add to this list.
PointsCaptured = list()
def KeywordUpdateThread():
def KeywordUpdateModule():
global KeywordsToFilter
print("Authenticating Keyword Updater")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.secure = True
auth.set_access_token(access_token, access_token_secret)
myApi = tweepy.API(auth)
myKeywordStream = Stream(auth, tweepy.StreamListener())
print("Authentication successfull!")
keywords = list()
timeline = myApi.home_timeline()
for raw_data in timeline:
message = raw_data.text
kw = indicoKeywords(message)
for keyword in kw:
keywords.append(unicode(keyword.decode('utf-8')))
print("Keywords collected, sleeping...")
myKeywordStream.disconnect()
print(keywords)
KeywordsToFilter = keywords
# end KeywordUpdateModule():
try:
while True:
print("Asking for new political keywords")
spawn_off = threading.Thread(target=KeywordUpdateModule)
spawn_off.start()
time.sleep(60)
except Exception, e:
print("An error occurred with the keyword observer stream thread. This wasn't supposed to happen!")
print(e.message.decode())
# end KeywordUpdateThread()
def TwitterUpdateThread():
def TwitterObserverModule():
finalKeywords=list()
for keyword in KeywordsToFilter:
# print(type(keyword))
decoded_str = keyword.decode("windows-1252")
encoded_str = decoded_str.encode("utf8")
# print(keyword + " " + decoded_str + " " + encoded_str)
if(encoded_str is None):
print(keyword + " was passed as NoneType")
continue
finalKeywords.append(encoded_str)
TwitterStream.filter(track=finalKeywords)
# end TwitterObserverModule()
try:
print("Authenticating Twitter Updater")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.secure = True
auth.set_access_token(access_token, access_token_secret)
myApi = tweepy.API(auth)
TwitterStream = Stream(auth, MyStreamListener())
print("Authentication of Twitter Streamsuccessfull!")
while True:
if(len(KeywordsToFilter)>0):
TwitterObserverModule()
time.sleep(30)
except:
print("An error occurred with the Twitter feed. This wasn't supposed to happen!")
# end TwitterUpdateThread()
class MyStreamListener(tweepy.StreamListener):
counter = 0
def __init__(self):
self.f = open("file.txt",'ab')
def on_status(self, status):
print(status.text)
def on_error(self, status_code):
if status_code == 420:
#returning False in on_data disconnects the stream
return False
print(status_code)
def on_data(self, raw_data):
self.f.write(raw_data)
self.f.write('\n')
coordinates = None
global PointsCaptured
data = json.loads(raw_data)
if 'text' in data:
message = data['text']
print(message)
# Someday we'll get to this... Not today though.
# if isNotEnglish(message):
# message = translateFromUnknownLanguageToEnglish(message)
politicalTag = indicoPolitics(message)
print("Doing GEog")
if 'user' in data:
if 'coordinates' in data['user']:
if 'coordinates' in data['user']['coordinates']:
mostAccurateLocation = data['coordinates']['coordinates']
else:
mostAccurateLocation = data['coordinates']
coordinates = mostAccurateLocation
# elif 'location' in data['user']:
# location = data['user']['location']
# if location is not None:
# decoded_str = location.decode("windows-1252")
# encoded_str = decoded_str.encode("utf8")
# if encoded_str is None:
# print(location + " was passed as NoneType")
# if ',' in encoded_str:
# mostAccurateLocation = encoded_str
# try:
# url = "https://maps.googleapis.com/maps/api/geocode/json?address="+mostAccurateLocation.encode('utf-8')+"&key="+googleAPIKey
# print(url)
# result = urllib2.urlopen(url)
# except Exception, e:
# print(e)
# coordinates = result
# else:
# mostAccurateLocation = None
# coordinates = None
# coordinates=None
# poliNumber = indicoPoliticsNumber(message)
# print(poliNumber + "!!!")
# positivity = indicoPositivity(message)
# print(positivity + "!!!")
#
print("Finished geog, doing states")
myStateOfPoint = StateOfPoint()
myStateOfPoint.newPoint.party = politicalTag
# myStateOfPoint.newPoint.tendency = poliNumber
# myStateOfPoint.positivity = positivity
print("doing coordinates")
if coordinates is not None:
print(coordinates)
coordinates = mostAccurateLocation.split(', ')
myStateOfPoint.newPoint.lat = coordinates[0]
myStateOfPoint.newPoint.long = coordinates[1]
#
# PointsCaptured.append(myStateOfPoint)
# if len(PointsCaptured) is MAX_CACHE_NUMBER:
# PointsCaptured.remove(0) # Remove the first element!
# print(myStateOfPoint)
# end Class MyStreamListener
def indicoPolitics(tweet):
tag_dict = indicoio.political(tweet)
return sorted(tag_dict.keys(), key=lambda x: tag_dict[x], reverse=True)[:1]
def indicoPoliticsNumber(tweet):
tag_dict = indicoio.political(tweet)
print(tag_dict)
top = sorted(tag_dict.keys(), key=lambda x: tag_dict[x], reverse=True)[:1]
print(tag_dict[top[0]])
return tag_dict[top[0]]
def indicoPositivity(tweet):
return indicoio.sentiment(tweet)
def indicoKeywords(tweet):
tag_dict = indicoio.keywords(tweet)
return sorted(tag_dict.keys(), key=lambda x: tag_dict[x], reverse=True)[:1]
def translateFromUnknownLanguageToEnglish(tweetText):
service = build('translate', 'v2', developerKey=googleAPIKey)
lFrom = service.detections().list(q=tweetText).execute()['detections'][0][0]['language']
return service.translations().list(source=lFrom, target='en', q=tweetText).execute()
def InitAuth():
global auth
print(auth)
if auth is None:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.secure = True
auth.set_access_token(access_token, access_token_secret)
print(auth)
# end InitAuth()
| |
#!python
__version__ = "1.7.20401.3"
import os.path
import re
import datetime
import xml.etree.ElementTree as xmlDoc
InvalidCharacters = ".$*{}|<>"
class BuckleTool:
def ProcessCommandLine(self):
import argparse
parser = argparse.ArgumentParser(
description="Generates strongly typed wrappers for string and bitmap .resx resources")
parser.add_argument("resxFileName", metavar = "RESXFILE")
parser.add_argument(
"-o", dest="csFileName", metavar = "CSFILE",
help="Specify different name for .cs file.")
parser.add_argument(
"-r", dest="resourcesFileName", metavar = "RESOURCESFILE",
help="Specify different name for .resources file.")
parser.add_argument(
"-n", dest="namespace",
help="Namespace to use in generated C# class.")
parser.add_argument(
"-c", dest="className",
help="Class name to use in generated C# class.")
parser.add_argument(
"-b", dest="basename",
help="Alternate file basename that will be used instead of the namespace and class " +
"name to embed the .resources file in the assembly manifest.")
parser.add_argument(
"-w", dest="wrapperClassName", default = "ToolBelt.Message",
help="String wrapper class. See Message.cs for details. Default is %(default)s")
parser.add_argument(
"-a", dest="modifier", default="public",
help="Access modifier for properties and methods. Default is %(default)s")
parser.add_argument(
"-q", dest="noLogo", action="store_true",
help="Suppress logo.")
parser.add_argument(
"-i", dest="incremental", action="store_true",
help="incremental build. Create outputs only if out-of-date.")
parser.parse_args(namespace = self)
if not self.noLogo:
print("Buckle ResX to C# String Wrapper Class Generator. Version " + __version__)
print("Copyright (c) 2012, John Lyon-Smith.")
if (self.csFileName is None):
self.csFileName = os.path.splitext(self.resXFileName)[0] + ".cs"
if (self.incremental):
if (os.path.exists(self.csFileName) and
(os.path.getmtime(self.resxFileName) < os.path.getmtime(self.csFileName))):
return False
if self.className is None:
self.className = os.path.basename(self.resxFileName)
return True
def ReadResources(self):
self.resources = xmlDoc.parse(self.resxFileName)
self.haveDrawingResources = False
root = self.resources.getroot()
for elem in root.iter("assembly"):
print(elem.tag, elem.get("alias"), elem.get("name"))
for elem in root.iter("data"):
# print elem.tag, elem.get("name"), elem.get("type")
if ((elem.get("type") is not None) and
(elem.get("type").startswith("System.Drawing"))):
self.haveDrawingResources = True
def WriteCs(self):
self.csFile = open(self.csFileName, "w")
self.WriteNamespaceStart()
self.WriteClassStart()
num = 0
root = self.resources.getroot()
for elem in root.iter("data"):
num += 1
self.WriteResourceMethod(elem)
self.WriteClassEnd()
self.WriteNamespaceEnd()
print("Generated wrapper class '%s' for %d resource(s)" % (self.csFileName, num))
def WriteNamespaceStart(self):
now = datetime.datetime.now()
self.csFile.write(
"""//
// This file generated by the Buckle tool on %s at %s.
//
// Contains strongly typed wrappers for resources in %s
//
""" % (now.strftime("%m/%d/%Y"), now.strftime("%I:%M %p"), os.path.basename(self.resxFileName)))
if self.namespace is not None:
self.csFile.write("namespace %s {\n" % self.namespace)
self.csFile.write(
"""using System;
using System.Reflection;
using System.Resources;
using System.Diagnostics;
using System.Globalization;
""")
if self.haveDrawingResources:
self.csFile.write("using System.Drawing;\n")
self.csFile.write("\n")
def WriteNamespaceEnd(self):
if (self.namespace is not None) and (len(self.namespace) > 0):
self.csFile.write("}\n")
def WriteClassStart(self):
self.csFile.write('''/// <summary>
/// Strongly typed resource wrappers generated from %(resx)s.
/// </summary>
%(modifier)s class %(className)s
{''' % {'resx': os.path.basename(self.resxFileName), 'modifier': self.modifier, 'className': self.className})
if self.basename is None:
self.csFile.write('''
internal static readonly ResourceManager ResourceManager = new ResourceManager(typeof(%(class)s);
''' % {'class': self.className})
else:
self.csFile.write('''
internal static readonly ResourceManager ResourceManager = new ResourceManager("%(base)s", Assembly.GetExecutingAssembly());
''' % {'base': self.basename})
def WriteClassEnd(self):
self.csFile.write("}\n")
def WriteResourceMethod(self, elem):
value = elem.find("value").text
name = elem.get("name")
for char in InvalidCharacters:
name = name.replace(char, "_")
try:
paramCount = self.GetNumberOfParametersForStringResource(value)
except Exception as exception:
print("Error: Resource has been skipped: %s" % exception)
return
self.csFile.write("\n /// <summary>\n")
self.csFile.write(" /// %s\n" % value)
self.csFile.write(" /// </summary>\n")
if paramCount > 0:
parametersWithTypes = ""
parameters = ""
for j in range(paramCount):
str3 = ""
if j > 0:
str3 = ", "
parametersWithTypes = parametersWithTypes + \
str3 + "object param" + repr(j)
parameters = parameters + str3 + "param" + repr(j)
self.csFile.write(""" public static %s %s(%s)
{
Object[] o = { %s };
return new %s("%s", typeof(%s), ResourceManager, o);
}
""" % (self.wrapperClassName, name, parametersWithTypes, parameters, self.wrapperClassName, name, self.className))
else:
self.csFile.write(""" public static %s %s
{
get
{
return new %s("%s", typeof(%s), ResourceManager, null);
}
}
"""
% (self.wrapperClassName, name, self.wrapperClassName, name, self.className))
def GetNumberOfParametersForStringResource(self, resourceValue):
regex = re.compile("(?P<value>\{[0-9]*\})")
num = -1
for match in regex.findall(resourceValue):
try:
num3 = int(match[1:-1])
num = max(num, num3)
except ValueError as exception:
raise Exception(resourceValue + str(exception) + match[1:-1])
return num + 1
if __name__ == '__main__':
b = BuckleTool()
if b.ProcessCommandLine():
b.ReadResources()
b.WriteCs()
| |
import numpy as np
import inkex
def jumpit(displ):
if displ >= 0: # positive jump
print 'positive jumpit', displ
# check here for jump too large, 8*256 or 2048 (204.8 mm) and above are too large
if displ > 2047:
print "Error, jump greater than 204.7 mm found"
msb = np.trunc(displ / 256)
lsb = np.trunc(displ - msb * 256)
firstbyte = 128 + msb
secondbyte = lsb
return int(firstbyte), int(secondbyte)
else: # negative jump
print 'negative jumpit', displ
if displ < -2047:
print "Error, jump greater than 204.7 mm found"
msb = 15 - np.trunc(displ / 256)
lsb = 256 + np.trunc(displ - np.trunc(displ / 256)) # need to check this
firstbyte = 128 + msb
secondbyte = lsb
return int(firstbyte), int(secondbyte)
def msb8(st):
st = int(st) * 10
if np.abs(st) > 32767:
print "warning: stitch found more than 32.7 cm from origin, pattern too large"
return np.uint8(127 * np.sign(st)) # just assign it the max value
else:
return np.uint8(np.trunc(st / 256))
def lsb8(st):
return np.uint8(st - 256 * msb8(st))
def stitchdisp(s):
s=s*10
if 0 <= s <= 63:
return int(s), 0
else:
if -63 <= s < 0:
return int(128 + (s)), 0
else:
stitchLengthError = s
return int(s), int(stitchLengthError)
def make_pes(path, filename, new_pts):
# make a PES file. Most of this code is from Dr. Cindy Harnett, U of L
# new_points are already scaled and rotated so we skip that part in her
# code.
inputx = np.array(new_pts[:, 0])
inputy = np.array(new_pts[:, 1])
NumberOfStitches = len(inputx)
NumStitchMSB = int(np.trunc(NumberOfStitches / 256))
NumStitchLSB = int(NumberOfStitches - NumStitchMSB * 256)
NumStitchMSB, NumStitchLSB # will be used later
xsize = (np.max(inputx) - (np.min(inputx) + 2)) * 10 # * np.abs(shift[0]) ##new CKH Mar 4 2017 pesky bounding box problem workaround
ysize = (np.max(inputy) - (np.min(inputy) + 2)) * 10 # * np.abs(shift[1])
print 'xsize', xsize
print 'ysize', ysize
YsizeMSB = int(np.trunc(ysize / 256))
YsizeLSB = int(ysize - 256 * YsizeMSB)
XsizeMSB = int(np.trunc(xsize / 256))
XsizeLSB = int(xsize - 256 * XsizeMSB)
XsizeMSB, XsizeLSB, YsizeMSB, YsizeLSB # gonna be used later
# Positions of items in PES files, for Python where vectors start at 0
PECstartloc = 8
NumColorLocOffset = 48 # related to where to find number of colors in the file
# Trever Adams has 48, this seems to match what StitchBuddy does. usually put 0 (one color)
ColorLocOffset = 49 # this is what Achatina mentions, it refers to the start of the color table
# the next few bytes would hold additional color values but USUALLY only have one color
# so I did not handle more bytes here yet.
GraphicLocOffset = 514 # matches what Trever Adams has
SizeLocOffset = 520 # it's related to where we will write the x and y extents
StitchStartOffset = 532 # it's related to where we will start the cumulative stitch data
StitchEndOffset = 512 # it's related to where the stitch data will end
# Bytes 0-7 are a header
#PESdatastart = [35, 80, 69, 83, 48, 48, 48, 49] # writes #PES0001 at start of file
#in hex
PESdatastart = [0x23, 0x50, 0x45, 0x53, 0x30, 0x30, 0x30, 0x31]
# Bytes 8, 9 and 10 give location data for the graphics. I will come back to it later on.
PESsection1 = [0, 0, 0]
# bytes 0x08 - 0x0b uint32 offset for pec file
#
# Bytes 11 through 30
PESsection2 = [0, 0, 0, 1, 0, 1, 0, 255, 255, 0, 0, 7, 0, 67, 69, 109, 98, 79, 110, 101] # 7 0 CEmbOne
# The pattern seems consistent between different files
# this section seem to be the name, catagory, author, keywords, comments strings
# PESsection3
# Bytes 31-46 differ between patterns.
# In a square data file, the last 2 bytes were a copy of the first 2
##Other people are saying this is min X, min Y , max X, max Y
# But I'm not finding those minX and minY values in the file. And they are all positive. Their
# difference matches the width (x) and height(y) respectively but there is
# some large positive offset. It turns out, this offset centers the bounding box
# at 1000,1000(10 cm, 10 cm) from which min X, min Y, max X, and max Y are calculated,
# keeping all the range-numbers positive I guess for a 4-inch hoop
minXMSB = int(np.trunc(np.min(1000 + inputx) / 256))
minXLSB = int(np.min(1000 + inputx) - 256 * minXMSB)
minYMSB = int(np.trunc(np.min(1000 + inputy) / 256))
minYLSB = int(np.min(1000 + inputy) - 256 * minYMSB)
maxXMSB = int(np.trunc(np.max(1000 + inputx) / 256))
maxXLSB = int(np.max(1000 + inputx) - 256 * maxXMSB)
maxYMSB = int(np.trunc(np.max(1000 + inputy) / 256))
maxYLSB = int(np.max(1000 + inputy) - 256 * maxYMSB)
PESsection3 = [minXLSB, minXMSB, minYLSB, minYMSB, maxXLSB, maxXMSB, maxYLSB, maxYMSB]
PESsection3 = PESsection3 * 2 # Both files repeat the 8 bytes 2x
print 'PES3 [minXLSB, minXMSB, minYLSB, minYMSB, maxXLSB, maxXMSB, maxYLSB, maxYMSB]'
print PESsection3
# Maybe they expect a bigger hoop in another version of the format
# Meanwhile, the PE525 machine may be using this info to select a hoop but it IGNORES these XY endpoints
# and puts the centroid of the bounding box at 0,0 during stitching
# PESsection3=[232,3,232,3,232,3,232,3]*2
# StitchBuddy has 232,3 instead of 76,4. Trying that out instead to see if my pattern appears.
# Another consistent stretch between bytes 47 and 72, there's a discrepancy btw what Stitchbuddy
# produces and what Trever Adams finds, he has 0,0,0,0,0,0,0,0 instead of 0,0, 122,68,0,0,122,68
PESsection4 = [0, 0, 128, 63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 63, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0];
# PESsection5. ok except all my examples are for a centered design
# and the 1cm square here is not centered.
# Bytes 73-80 are different between files. Bytes 77-78 are x extent
# and bytes 79-80 are y-extent. 73-74 246 255 in both rotated and unrotated,
# 75 76 y-offset 250 0 vs 236 0 in the rotated one.
# PESsection5=[XoffsetLSB XoffsetMSB YoffsetLSB YoffsetMSB XsizeLSB XsizeMSB YsizeLSB YsizeMSB];
# For now. The bytes 73-80 are supposed to be "offsets" but from what?
# These offsets turn out to be just half the size. But also, if you add the y offset to
# 1000 you get supposed max Y from PESSection3, and if you subtract the x offsset
# from 1000 you get the alleged max X from PESSEction3.
# I am reading online that some of these numbers should be the design X offset, design Y offset (Treveradams/libpes on Github)
# *******Another place to check.
# But also minimal X of all stitches in CSewSeg, max Y of all stitches in CSewSeg. That doesn't seem like the same thing.
# XoffsetMSB = int(np.trunc(xsize / 256 / 2))
# # XoffsetMSB=0;
# XoffsetLSB = int(xsize / 2 - 256 * XoffsetMSB)
# # XoffsetLSB=0;#what StitchBuddy is producing that works
# YoffsetMSB = int(np.trunc(ysize / 256 / 2))
# # YoffsetMSB=0;
# YoffsetLSB = int(ysize / 2 - 256 * YoffsetMSB)
# # YoffsetLSB=0;
PESsection5 = [0, 0, 0, 0, XsizeLSB, XsizeMSB, YsizeLSB, YsizeMSB]
#PESsection5 = [0, 0, 0, 0, 0, 0, 0, 0]
# StitchBuddy puts 0s for these offsets.
# PESsection 6, bytes 81-88 consistent between files and is filled with 0s
PESsection6 = [0, 0, 0, 0, 0, 0, 0, 0]
# PESsection 7=?
# It is a single byte, at 89, that differs between files
PESsection7 = [0] # "professionally designed files seem to have 1 here" (Trever Adams). But StitchBuddy puts 0.
# PESsection 8, bytes 90-103 are consistent between files
PESsection8 = [0, 255, 255, 0, 0, 7, 0, 67, 83, 101, 119, 83, 101, 103] # 0, section break [255 255 0 0] then 7 0 CSewSeg
# PESsection 9 -different and different length between files!!!
# one file has 16 more bytes here
# than the other. Bytes 104-107
# Other online info says it's about colors.
# Color Start Indicator, Color Index says http://www.njcrawford.com/programs/embroidery-reader/pes0001/
# For both the final byte is 0 and maybe that is the color index?
# PESsection9=[ColorChangeIndLSB ColorChangeIndMSB ColorIndexLSB ColorIndexMSB
# NumberOfStartupStitchesLSB NumberOfStartupStitchesMSB XCoord1LSB XCoord1MSB
# YCoord1LSB YCoord1MSB..XCoordNLSB...6 more bytes];%Get a few more examples to figure section 9 out. There is
# something about a rotation matrix here but that seems to be PES version 7
# StitchBuddy gives
PESsection9 = [0, 0, 39, 0]
# PESsection 10 - 2 bytes giving the number of stitches, LSB first
PESsection10 = [NumStitchLSB, NumStitchMSB]
# Then add on the absolute stitch coords as 2 byte xy values. StitchBuddy has all my stitches as follows:
# (50,0 0,0) (100,0 0,0) (100,0, 206,255 --a y value of -5 mm) (100,0 156,255-- a y value of -10 mm) (50,0 156,255)
# (0,0 156,255) (0,0, 206,255) (0,0 0,0)
# How weird that it seems like StitchBuddy flipped the y values to negative ones. I put a - sign on inputy for this reason
# There are 8 stitches. #perhaps here I can put in the 9th stitch and get it right...start the above sequence with (0,0 0,0)
PESsection11 = [50, 0, 0, 0, 100, 0, 0, 0, 100, 0, 206, 255, 100, 0, 156, 255, 50, 0, 156, 255, 0, 0, 156, 255,
0, 0, 206, 255, 0, 0, 0, 0] # AbsStitchCoords
stitchpile = np.array(zip(inputx, inputy)).flatten()
# I used np.uint8(), this works to translate the negative numbers but it will wrap around, np.uint8(500) =12
PESsection11 = [byte for s in stitchpile for byte in [lsb8(s), msb8(s)]] # create msb8 and lsb8 functions
# the order of operations is alien to me but it's like
# (for s in stitchpile) gets every x or y stitch coord and calls it s
# then [lsb8(s),msb8(s)] is a length 2 list of the LSB and MSB of that stitch in uint8 format
# then, take each byte in [lsb8(s),msb8(s)] and add it to the list
# It would be more readable to flatten a structured list
# that I would create by [[lsb8(s),msb8(s)] for s in stitchpile]
# but Python doesn't have flatten (numpy does).
# they refused to create a flatten function bc generally, lists of lists can have irregular shapes
# and then flatten is ambiguous
# Coming from Matlab I expect it all to be a rectangle, & there's a clear plan for exactly how flatten will work
# Then StitchBuddy puts this, I dont know what it means yet
PESsection12 = [1, 0, 0, 0, 39, 0, 0, 0, 0, 0]
# The next section from pecstart:pecstart+49 is...perhaps some labels, this looks like chars
# [76, 65, 58, 49, 99, 109, 83, 113, 117, 97, 114,101,84,101,115,116,32,32,32,13 ]
# Yes it is [L A : 1 c m S q u a r e T e s t (space=32) (space) (space) (cr=13)]
# It could be a 16 char label, filled out by spaces, and then a carriage retrn
# Then all this looking like spaces and a few control characters, maybe these could hold names for colors
# 32,32,32,32,32,32,32,32,32,32,32,32,255,0,6,38,32,32,32,32,32,32,32,32,32,32,32,32]
# Therefore pecstart is calculated as len(everthing up to and including PESsection12)
PECsection1 = [76, 65, 58] # LA:
# use ord('a') to convert my output filename to a label. why not atoi or int, where ever did they get ord
labelname = filename.split('.', 1) # get first 16 chars before any dot in the filename
mylabel = [ord(s) for s in labelname[0]]
mylabel = mylabel[0:16]
mylabel = mylabel + [32] * (16 - len(mylabel)) + [13]
PECsection1 = PECsection1 + mylabel + [32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 255, 0, 6, 38, 32, 32,
32, 32, 32, 32, 32, 32, 32, 32, 32, 32]
# now a familiar neighborhood vx[pecstart+49]=0 #number of colors in file is
PECsection2 = [0] # Number of Colors in File, for single color files I see 0 a lot, so maybe its num of color changes?
# THEN there is a 39, which is probably a color index. Then it's filled with spaces (32s)
# Followed by [0,0] up to and including vx[pecstart+514]
PECsection3 = [39] + [32] * (GraphicLocOffset - NumColorLocOffset - 4)
PECsection3 = PECsection3 + [0, 0]
# Then we have a [37,0,0] which is "graphic" location for this design-- it will vary--is calculated from the data
# I need to figure out later on while building the output vector.
# pecstart+GraphicLocOffset=LSB of graphic, pecstart+515 &516 are the remaining bytes
PECsection4 = [0, 0, 0] # graphic - placeholder till vector is assembled
# Then [49,255,240] -- is it the same for different files? vx[pecstart+518:pecstart+521]
PECsection5 = [49, 255, 240] # unknown meaning
# Then [XsizeLSB,XsizeMSB,YsizeLSB,YsizeMSB] #vx[pecstart+521:pecstart+525]
PECsection6 = [XsizeLSB, XsizeMSB, YsizeLSB, YsizeMSB]
print 'x,y size:', PECsection6
# the lowest x value is also the value of the initial jump
# however the highest y value is not at the start
# I need to move the pattern down by ysize/2-inputy[0]
#print inputx
#print inputy
startXoff = (xsize / 2) - inputx[0]
startYoff = (ysize / 2) - inputy[0]
# now I have to convert the last two byte pairs in PECsection7 to these offsets
# using the jump stitch method
print 'startXoff', startXoff, 'startYoff', startYoff
xStart = jumpit(startXoff)
yStart = jumpit(startYoff)
print 'jumpit X', xStart[0], xStart[1]
print 'jumpit Y', yStart[0], yStart[1]
# In PECsection7 the first 4 bytes are fixed and unknown meaning
# and last 4 change where the design center winds up in StitchBuddy
# [224,1,176,1, jump to X CENTER , rest of jump to X CENTER, jump to Y CENTER, rest of jump to Y CENTER]
PECsection7 = [224, 1, 176, 1, xStart[0], xStart[1], yStart[0], yStart[1]]
print PECsection7
diffx = np.diff(inputx)
diffy = np.diff(inputy)
# diffx[3]=100 #Testing what happens if a stitch is greater than the max length of 63
diffpile = np.array(zip(diffx, diffy)).flatten()
# PECsection8=[50, 0, 50, 0, 0, 78, 0, 78, 78, 0, 78, 0, 0, 50, 0, 50] #This is the 7 bit encoded displacement data.
#temp = [stitchdisp(s) for s in diffpile]
PECsection8 = [stitchdisp(s)[0] for s in diffpile]
PECtoolongs = [stitchdisp(s)[1] for s in diffpile] # save locations of any too-long stitches along with their lengths
# PECsection8,PECtoolongs
print PECsection8
# Then [255]
PECsection9 = [255] # end of stitch data. Achatina and Trever Adams has [255 0] but StitchBuddy leaves off the 0
# the graphic for this color
PECsection10 = [0,
0,
0,
0,
0,
0,
240,
255,
255,
255,
255,
15,
8,
0,
0,
0,
0,
16,
4,
0,
0,
0,
0,
32,
2,
0,
0,
0,
0,
64,
2,
252,
255,
255,
63,
64,
2,
252,
255,
255,
63,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
0,
0,
48,
64,
2,
12,
128,
255,
63,
64,
2,
12,
128,
255,
63,
64,
2,
0,
0,
0,
0,
64,
4,
0,
0,
0,
0,
32,
8,
0,
0,
0,
0,
16,
240,
255,
255,
255,
255,
15,
0,
0,
0,
0,
0,
0]
# Finally tack on another copy of the 228 byte pixel graphic. vx[pecstart+graphic+513+228:] is same as above. That is all
PECsection11 = PECsection10
# Concatenate everything up to just before the PEC block and then calculate length
vx = PESdatastart + PESsection1 + PESsection2
vx = vx + PESsection3 + PESsection4 + PESsection5
vx = vx + PESsection6 + PESsection7 + PESsection8
vx = vx + PESsection9 + PESsection10 + PESsection11 + PESsection12
pecstartValue = len(vx)
pecstartValueMSB = np.trunc(pecstartValue / 65536)
pecstartValueMiddleBit = np.trunc((pecstartValue - 65536 * pecstartValueMSB) / 256)
pecstartValueLSB = int(pecstartValue - pecstartValueMSB * 65536 - pecstartValueMiddleBit * 256)
pecstartValueMSB = int(pecstartValueMSB)
pecstartValueMiddleBit = int(pecstartValueMiddleBit)
# Fill in values for pecstart at pecstart location
vx[PECstartloc] = pecstartValueLSB
vx[PECstartloc + 1] = pecstartValueMiddleBit
vx[PECstartloc + 2] = pecstartValueMSB
# pecstartValueLSB, pecstartValueMiddleBit,pecstartValueMSB #check on it
vx = vx + PECsection1 # Label containing design name
# Check on the assembly of this vector because things have to be at known offsets
NumColorLocError = len(vx) - pecstartValue - NumColorLocOffset
if NumColorLocError:
print "Number of Colors location is off by"
print NumColorLocError
else:
print "Number of Colors location is right"
vx = vx + PECsection2 # Number of colors in file
vx = vx + PECsection3 # A bunch of spaces (32s) until the location of the graphic pointer
GraphicLoc = len(vx)
GraphicLocError = GraphicLoc - pecstartValue - GraphicLocOffset
if GraphicLocError:
print "Graphic location is off by"
print GraphicLocError
else:
print "Graphic location is right"
vx = vx + PECsection4 # Section 4 is 3 bytes giving graphic offset--going to calculate that in a little bit
vx = vx + PECsection5 + PECsection6 + PECsection7 # Size data and other data: 15 bytes
StitchStartLocError = len(vx) - pecstartValue - StitchStartOffset
if StitchStartLocError:
print "Stitch start location is off by"
print StitchStartLocError
else:
print "Stitch start location is right"
vx = vx + PECsection8 # This is the stitch displacement data
vx = vx + PECsection9 # This is the end of stitch signal
for i in range(len(PECtoolongs)): # Defined in PECsection 8 calculation
if PECtoolongs[i]:
print "Stitch", i / 2, "is >", PECtoolongs[i] / 10, "mm long, but max length is 6.3 mm"
graphic = len(vx) - pecstartValue - StitchEndOffset # this is how graphic is defined
# now calculate LSB, middle and MSB of graphic to plug in to graphic location
graphicMSB = np.trunc(graphic / 65536)
graphicMiddleBit = np.trunc((graphic - 65536 * graphicMSB) / 256)
graphicLSB = int(graphic - graphicMSB * 65536 - graphicMiddleBit * 256)
graphicMSB = int(graphicMSB)
graphicMiddleBit = int(graphicMiddleBit)
vx[GraphicLoc] = graphicLSB
vx[GraphicLoc + 1] = graphicMiddleBit
vx[GraphicLoc + 2] = graphicMSB
vx = vx + PECsection10 + PECsection11 # add 2 copies of the pixel graphic
vx = np.array(vx)
f=open(path + filename, 'wb')
vx.astype('uint8').tofile(f)
f.close()
return True
| |
from __future__ import print_function, division
from sympy.core.basic import C
from sympy.core.expr import Expr
from sympy.core.relational import Eq
from sympy.sets.sets import Interval
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Wild, Symbol)
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, xrange
from sympy.core.containers import Tuple
from sympy.functions.elementary.piecewise import piecewise_fold, Piecewise
from sympy.utilities import flatten
from sympy.utilities.iterables import sift
def _process_limits(*symbols):
"""Process the list of symbols and convert them to canonical limits,
storing them as Tuple(symbol, lower, upper). The orientation of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
"""
limits = []
orientation = 1
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif is_sequence(V, Tuple):
V = sympify(flatten(V))
if V[0].is_Symbol:
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval):
V[1:] = [V[1].start, V[1].end]
if len(V) == 3:
if V[1] is None and V[2] is not None:
nlim = [V[2]]
elif V[1] is not None and V[2] is None:
orientation *= -1
nlim = [V[1]]
elif V[1] is None and V[2] is None:
nlim = []
else:
nlim = V[1:]
limits.append(Tuple(newsymbol, *nlim ))
continue
elif len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, orientation
class ExprWithLimits(Expr):
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = sympify(function)
if hasattr(function, 'func') and function.func is C.Equality:
lhs = function.lhs
rhs = function.rhs
return C.Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
function = piecewise_fold(function)
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
# Only limits with lower and upper bounds are supported; the indefinite form
# is not supported
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the dummy variables
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
set([y])
"""
# don't test for any special values -- nominal free symbols
# should be returned, e.g. don't return set() if the
# function is zero -- treat it like an unevaluated expression.
function, limits = self.function, self.limits
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
@property
def is_number(self):
"""Return True if the Sum has no free symbols, else False."""
return not self.free_symbols
def as_dummy(self):
"""
Replace instances of the given dummy variables with explicit dummy
counterparts to make clear what are dummy variables and what
are real-world symbols in an object.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, x, y), (y, x, y)).as_dummy()
Integral(_x, (_x, x, _y), (_y, x, y))
If the object supperts the "integral at" limit ``(x,)`` it
is not treated as a dummy, but the explicit form, ``(x, x)``
of length 2 does treat the variable as a dummy.
>>> Integral(x, x).as_dummy()
Integral(x, x)
>>> Integral(x, (x, x)).as_dummy()
Integral(_x, (_x, x))
If there were no dummies in the original expression, then the
the symbols which cannot be changed by subs() are clearly seen as
those with an underscore prefix.
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the integration variable
"""
reps = {}
f = self.function
limits = list(self.limits)
for i in xrange(-1, -len(limits) - 1, -1):
xab = list(limits[i])
if len(xab) == 1:
continue
x = xab[0]
xab[0] = x.as_dummy()
for j in range(1, len(xab)):
xab[j] = xab[j].subs(reps)
reps[x] = xab[0]
limits[i] = xab
f = f.subs(reps)
return self.func(f, *limits)
def _eval_interval(self, x, a, b):
limits = [( i if i[0] != x else (x,a,b) ) for i in self.limits]
integrand = self.function
return self.func(integrand, *limits)
def _eval_subs(self, old, new):
"""
Perform substitutions over non-dummy variables
of an expression with limits. Also, can be used
to specify point-evaluation of an abstract antiderivative.
Examples
========
>>> from sympy import Sum, oo
>>> from sympy.abc import s,n
>>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
Sum(n**(-2), (n, 1, oo))
>>> from sympy import Integral
>>> from sympy.abc import x,a
>>> Integral(a*x**2,x).subs(x,4)
Integral(a*x**2, (x, 4))
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the dummy variable for intgrals
change_index : Perform mapping on the sum and product dummy variables
"""
func, limits = self.function, list(self.limits)
# If one of the expressions we are replacing is used as a func index
# one of two things happens.
# - the old variable first appears as a free variable
# so we perform all free substitutions before it becomes
# a func index.
# - the old variable first appears as a func index, in
# which case we ignore. See change_index.
# Reorder limits to match standard mathematical practice for scoping
limits.reverse()
if not isinstance(old, C.Symbol) or \
old.free_symbols.intersection(self.free_symbols):
sub_into_func = True
for i, xab in enumerate(limits):
if 1 == len(xab) and old == xab[0]:
xab = (old, old)
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
sub_into_func = False
break
if isinstance(old,C.AppliedUndef) or isinstance(old,C.UndefinedFunction):
sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
sy1 = set(self.variables).intersection(set(old.args))
if not sy2.issubset(sy1):
raise ValueError(
"substitution can not create dummy dependencies")
sub_into_func = True
if sub_into_func:
func = func.subs(old, new)
else:
# old is a Symbol and a dummy variable of some limit
for i, xab in enumerate(limits):
if len(xab) == 3:
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if old == xab[0]:
break
# simplify redundant limits (x, x) to (x, )
for i, xab in enumerate(limits):
if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
limits[i] = Tuple(xab[0], )
# Reorder limits back to representation-form
limits.reverse()
return self.func(func, *limits)
class AddWithLimits(ExprWithLimits):
r"""Represents unevaluated oriented additions.
Parent class for Integral and Sum.
"""
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
#
# This constructor only differs from ExprWithLimits
# in the application of the orientation variable. Perhaps merge?
function = sympify(function)
if hasattr(function, 'func') and function.func is C.Equality:
lhs = function.lhs
rhs = function.rhs
return C.Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
function = piecewise_fold(function)
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
" specify dummy variables for %s. If the integrand contains"
" more than one free symbol, an integration variable should"
" be supplied explicitly e.g., integrate(f(x, y), x)"
% function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
obj = Expr.__new__(cls, **assumptions)
arglist = [orientation*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def _eval_adjoint(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.conjugate(), *self.limits)
return None
def _eval_transpose(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_factor(self, **hints):
if 1 == len(self.limits):
summand = self.function.factor(**hints)
if summand.is_Mul:
out = sift(summand.args, lambda w: w.is_commutative \
and not w.has(*self.variables))
return C.Mul(*out[True])*self.func(C.Mul(*out[False]), \
*self.limits)
else:
summand = self.func(self.function, self.limits[0:-1]).factor()
if not summand.has(self.variables[-1]):
return self.func(1, [self.limits[-1]]).doit()*summand
elif isinstance(summand, C.Mul):
return self.func(summand, self.limits[-1]).factor()
return self
def _eval_expand_basic(self, **hints):
summand = self.function.expand(**hints)
if summand.is_Add and summand.is_commutative:
return C.Add(*[ self.func(i, *self.limits) for i in summand.args ])
elif summand != self.function:
return self.func(summand, *self.limits)
return self
| |
import datetime
import urllib2
from lxml import etree
from django.db.models import Q
from django.conf import settings
from football365.management.commands.football365_fetch import Command as BaseCommand
from football.models import LeagueGroup, League, Team, LogEntry, Fixture
class Command(BaseCommand):
pipeline = {
'table': ('table_raw', 'xml2dom', 'table_structure', 'table_commit'),
'fixtures': ('fixtures_raw', 'xml2dom', 'fixtures_structure', 'fixtures_commit'),
'results': ('results_raw', 'xml2dom', 'results_structure', 'results_commit'),
}
def table_commit(self, call, data):
leagues = League.objects.filter(football365_di=call.football365_service_id)
for league in leagues:
for obj in league.logentry_set.all():
obj.delete()
for row in data:
try:
team = Team.objects.get(Q(football365_teamcode=row['TEAMCODE']) | Q(title=row['TEAM']), leagues=league)
except (Team.DoesNotExist, Team.MultipleObjectsReturned):
continue
LogEntry.objects.create(
league=league,
team=team,
played=row['PLAYED'],
won=row['WON'],
drawn=row['DRAWN'],
lost=row['LOST'],
goals=row['GOALSFOR'],
points=row['POINTS'],
goal_difference=row['GOALDIFFERENCE']
)
leagues = League.objects.all()
for league in leagues:
groups = LeagueGroup.objects.filter(league=league, football365_di=call.football365_service_id)
for group in groups:
for obj in league.logentry_set.filter(group=group):
obj.delete()
for row in data:
try:
team = Team.objects.get(Q(football365_teamcode=row['TEAMCODE']) | Q(title=row['TEAM']), leagues=league)
except (Team.DoesNotExist, Team.MultipleObjectsReturned):
continue
try:
logentry = LogEntry.objects.get(league=league, team=team)
logentry.group = group
logentry.save()
except LogEntry.DoesNotExist:
LogEntry.objects.create(
league=league,
group=group,
team=team,
played=row['PLAYED'],
won=row['WON'],
drawn=row['DRAWN'],
lost=row['LOST'],
goals=row['GOALSFOR'],
points=row['POINTS'],
goal_difference=row['GOALDIFFERENCE']
)
def fixtures_commit(self, call, data):
leagues = League.objects.filter(football365_di=call.football365_service_id)
for league in leagues:
for row in data:
try:
home_team = Team.objects.get(Q(football365_teamcode=row['HOMETEAMCODE']) | Q(title=row['HOMETEAM']), leagues=league)
away_team = Team.objects.get(Q(football365_teamcode=row['AWAYTEAMCODE']) | Q(title=row['AWAYTEAM']), leagues=league)
except (Team.DoesNotExist, Team.MultipleObjectsReturned):
continue
# Does the fixture already exist?
q = league.fixture_set.filter(
home_team=home_team,
away_team=away_team,
datetime=row['STARTTIME']
)
if q.exists():
# Already stored
continue
# New fixture
Fixture.objects.create(
league=league,
home_team=home_team,
away_team=away_team,
datetime=row['STARTTIME']
)
leagues = League.objects.all()
for league in leagues:
groups = LeagueGroup.objects.filter(league=league, football365_di=call.football365_service_id)
for group in groups:
for row in data:
try:
home_team = Team.objects.get(Q(football365_teamcode=row['HOMETEAMCODE']) | Q(title=row['HOMETEAM']), leagues=league)
away_team = Team.objects.get(Q(football365_teamcode=row['AWAYTEAMCODE']) | Q(title=row['AWAYTEAM']), leagues=league)
except (Team.DoesNotExist, Team.MultipleObjectsReturned):
continue
# Does the fixture already exist?
# Don't filter by group as well - will cause duplicates
q = league.fixture_set.filter(
home_team=home_team,
away_team=away_team,
datetime=row['STARTTIME']
)
if q.exists():
# Already stored
if q[0].group != group:
q[0].group = group
q[0].save()
continue
# New fixture
Fixture.objects.create(
league=league,
group=group,
home_team=home_team,
away_team=away_team,
datetime=row['STARTTIME']
)
def results_commit(self, call, data):
leagues = League.objects.filter(football365_di=call.football365_service_id)
for league in leagues:
for row in data:
try:
home_team = Team.objects.get(Q(football365_teamcode=row['HOMETEAMCODE']) | Q(title=row['HOMETEAM']), leagues=league)
away_team = Team.objects.get(Q(football365_teamcode=row['AWAYTEAMCODE']) | Q(title=row['AWAYTEAM']), leagues=league)
except (Team.DoesNotExist, Team.MultipleObjectsReturned):
continue
# Does the fixture already exist?
q = league.fixture_set.filter(
home_team=home_team,
away_team=away_team,
datetime__gte=row['DATE'],
datetime__lt=row['DATE'] + datetime.timedelta(days=1)
)
if q.exists():
# Update
fixture = q[0]
fixture.home_score = row['HOMETEAMSCORE']
fixture.away_score = row['AWAYTEAMSCORE']
fixture.completed = True
fixture.save()
else:
# New fixture - should not happen if fixtures are fetched daily
Fixture.objects.create(
league=league,
home_team=home_team,
away_team=away_team,
datetime=row['DATE'],
home_score=row['HOMETEAMSCORE'],
away_score=row['AWAYTEAMSCORE'],
completed=True
)
leagues = League.objects.all()
for league in leagues:
groups = LeagueGroup.objects.filter(league=league, football365_di=call.football365_service_id)
for group in groups:
for row in data:
try:
home_team = Team.objects.get(Q(football365_teamcode=row['HOMETEAMCODE']) | Q(title=row['HOMETEAM']), leagues=league)
away_team = Team.objects.get(Q(football365_teamcode=row['AWAYTEAMCODE']) | Q(title=row['AWAYTEAM']), leagues=league)
except (Team.DoesNotExist, Team.MultipleObjectsReturned):
continue
# Does the fixture already exist?
# Don't filter by group as well - will cause duplicates
q = league.fixture_set.filter(
home_team=home_team,
away_team=away_team,
datetime__gte=row['DATE'],
datetime__lt=row['DATE'] + datetime.timedelta(days=1)
)
if q.exists():
# Update
fixture = q[0]
fixture.home_score = row['HOMETEAMSCORE']
fixture.away_score = row['AWAYTEAMSCORE']
fixture.completed = True
fixture.group = group
fixture.save()
else:
# New fixture - should not happen if fixtures are fetched daily
Fixture.objects.create(
league=league,
group=group,
home_team=home_team,
away_team=away_team,
datetime=row['DATE'],
home_score=row['HOMETEAMSCORE'],
away_score=row['AWAYTEAMSCORE'],
completed=True
)
| |
# Copyright (c) 2010, CCP Games
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of CCP Games nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY CCP GAMES ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CCP GAMES BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module is API-equivalent to the standard library :mod:`profile` module
lbut it is greenthread-aware as well as thread-aware. Use this module
to profile Eventlet-based applications in preference to either :mod:`profile` or :mod:`cProfile`.
FIXME: No testcases for this module.
"""
profile_orig = __import__('profile')
__all__ = profile_orig.__all__
from eventlet.patcher import slurp_properties
slurp_properties(profile_orig, globals(), srckeys=dir(profile_orig))
import sys
import functools
from eventlet import greenthread
from eventlet import patcher
import six
thread = patcher.original(six.moves._thread.__name__) # non-monkeypatched module needed
# This class provides the start() and stop() functions
class Profile(profile_orig.Profile):
base = profile_orig.Profile
def __init__(self, timer=None, bias=None):
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.base.__init__(self, timer, bias)
self.sleeping = {}
def __call__(self, *args):
"""make callable, allowing an instance to be the profiler"""
self.dispatcher(*args)
def _setup(self):
self._has_setup = True
self.cur = None
self.timings = {}
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.simulate_call("profiler")
def start(self, name="start"):
if getattr(self, "running", False):
return
self._setup()
self.simulate_call("start")
self.running = True
sys.setprofile(self.dispatcher)
def stop(self):
sys.setprofile(None)
self.running = False
self.TallyTimings()
# special cases for the original run commands, makin sure to
# clear the timer context.
def runctx(self, cmd, globals, locals):
if not getattr(self, "_has_setup", False):
self._setup()
try:
return profile_orig.Profile.runctx(self, cmd, globals, locals)
finally:
self.TallyTimings()
def runcall(self, func, *args, **kw):
if not getattr(self, "_has_setup", False):
self._setup()
try:
return profile_orig.Profile.runcall(self, func, *args, **kw)
finally:
self.TallyTimings()
def trace_dispatch_return_extend_back(self, frame, t):
"""A hack function to override error checking in parent class. It
allows invalid returns (where frames weren't preveiously entered into
the profiler) which can happen for all the tasklets that suddenly start
to get monitored. This means that the time will eventually be attributed
to a call high in the chain, when there is a tasklet switch
"""
if isinstance(self.cur[-2], Profile.fake_frame):
return False
self.trace_dispatch_call(frame, 0)
return self.trace_dispatch_return(frame, t)
def trace_dispatch_c_return_extend_back(self, frame, t):
# same for c return
if isinstance(self.cur[-2], Profile.fake_frame):
return False # ignore bogus returns
self.trace_dispatch_c_call(frame, 0)
return self.trace_dispatch_return(frame, t)
def SwitchTasklet(self, t0, t1, t):
# tally the time spent in the old tasklet
pt, it, et, fn, frame, rcur = self.cur
cur = (pt, it + t, et, fn, frame, rcur)
# we are switching to a new tasklet, store the old
self.sleeping[t0] = cur, self.timings
self.current_tasklet = t1
# find the new one
try:
self.cur, self.timings = self.sleeping.pop(t1)
except KeyError:
self.cur, self.timings = None, {}
self.simulate_call("profiler")
self.simulate_call("new_tasklet")
def TallyTimings(self):
oldtimings = self.sleeping
self.sleeping = {}
# first, unwind the main "cur"
self.cur = self.Unwind(self.cur, self.timings)
# we must keep the timings dicts separate for each tasklet, since it contains
# the 'ns' item, recursion count of each function in that tasklet. This is
# used in the Unwind dude.
for tasklet, (cur, timings) in six.iteritems(oldtimings):
self.Unwind(cur, timings)
for k, v in six.iteritems(timings):
if k not in self.timings:
self.timings[k] = v
else:
# accumulate all to the self.timings
cc, ns, tt, ct, callers = self.timings[k]
# ns should be 0 after unwinding
cc += v[0]
tt += v[2]
ct += v[3]
for k1, v1 in six.iteritems(v[4]):
callers[k1] = callers.get(k1, 0) + v1
self.timings[k] = cc, ns, tt, ct, callers
def Unwind(self, cur, timings):
"A function to unwind a 'cur' frame and tally the results"
"see profile.trace_dispatch_return() for details"
# also see simulate_cmd_complete()
while(cur[-1]):
rpt, rit, ret, rfn, frame, rcur = cur
frame_total = rit + ret
if rfn in timings:
cc, ns, tt, ct, callers = timings[rfn]
else:
cc, ns, tt, ct, callers = 0, 0, 0, 0, {}
if not ns:
ct = ct + frame_total
cc = cc + 1
if rcur:
ppt, pit, pet, pfn, pframe, pcur = rcur
else:
pfn = None
if pfn in callers:
callers[pfn] = callers[pfn] + 1 # hack: gather more
elif pfn:
callers[pfn] = 1
timings[rfn] = cc, ns - 1, tt + rit, ct, callers
ppt, pit, pet, pfn, pframe, pcur = rcur
rcur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
cur = rcur
return cur
def ContextWrap(f):
@functools.wraps(f)
def ContextWrapper(self, arg, t):
current = greenthread.getcurrent()
if current != self.current_tasklet:
self.SwitchTasklet(self.current_tasklet, current, t)
t = 0.0 # the time was billed to the previous tasklet
return f(self, arg, t)
return ContextWrapper
# Add "return safety" to the dispatchers
Profile.dispatch = dict(profile_orig.Profile.dispatch, **{
'return': Profile.trace_dispatch_return_extend_back,
'c_return': Profile.trace_dispatch_c_return_extend_back,
})
# Add automatic tasklet detection to the callbacks.
Profile.dispatch = dict((k, ContextWrap(v)) for k, v in six.viewitems(Profile.dispatch))
# run statements shamelessly stolen from profile.py
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
prof = Profile()
try:
prof = prof.run(statement)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats(sort)
def runctx(statement, globals, locals, filename=None):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
prof = Profile()
try:
prof = prof.runctx(statement, globals, locals)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats()
| |
'''
Created on 1 avr 2014
@author: yguern
'''
import unittest
import os, sys, urlparse,StringIO,shutil
CLASS_PATH = os.path.join(os.path.dirname(__file__), '..', '..','classes')
sys.path.append(CLASS_PATH)
from AbstractFS import fs
class TestAbstractFS(unittest.TestCase):
def test__formatUrl(self):
'''
decompose an url in dict, so to test we recompose url and we test equality between source url and made url
'''
url1 = "ftp://tartampion@10.1.8.241/path"
url1_reformated = "ftp://tartampion@10.1.8.241:21/path"
res = fs._formatUrl(url1)
self.assertEqual(url1_reformated, urlparse.urlunparse((res["handler"], res["netloc"]+":21", res["path"], "", "", "")))
url2 = "ftp://tartampion@10.1.8.241:21/path"
url2_reformated = "ftp://tartampion@10.1.8.241:21/path"
res = fs._formatUrl(url2)
self.assertEqual(url2_reformated, urlparse.urlunparse((res["handler"], res["netloc"], res["path"], "", "", "")))
def test__searchHandler(self):
'''
we add some handler and we want to found then following their credentials
'''
url1 = "ftp://tartampion@10.1.8.241:21/path2"
handler1 = fs._getHandler(url1)
url2 = "ftp://tartampion@10.1.8.241/path2"
handler2 = fs._getHandler(url2)
# # print fs._handlers["ftp"]
url3 = "ftp://tartampion@10.1.8.241:21/path2"
url3 = fs._formatUrl(url3)
url4 = "ftp://tartampion@10.1.8.241/path2"
url4 = fs._formatUrl(url4)
self.assertEqual(fs._searchHandler(url3), handler1)
self.assertEqual(fs._searchHandler(url4), handler2)
url1 = "ftp://tartampion@10.1.8.241:21/path"
url2 = "ftp://tartampion2@10.1.8.241:21/path"
handler1 = fs._getHandler(url1)
handler2 = fs._getHandler(url2)
#both of then must be the same, because there are same credential
self.assertEqual(fs._searchHandler(url3), handler1)
#conversely for two differents credentials, it's must have two different connection so two different instances
self.assertNotEqual(fs._searchHandler(url3), handler2)
def test__getHandler(self):
'''
In case of error _getHandler return False
'''
#it's must be valid credentials
file_src = "ftp://tartampion@10.1.8.241:8021"
file_dst = "ftp://tartampion@10.1.8.241:8021"
self.assertEqual(fs._getHandler(file_src), fs._getHandler(file_dst))
def test_makedirs(self):
'''
For all protocol we create tree, then we test existence of the path
'''
##########Local###############
#tree what you want create must be correct
local_path = "L:/yolo/path"
#test correct running
self.assertNotEqual(fs.makedirs(local_path), False)
#test correct behavior
#try if tree exists in fs
res = False
if os.path.isdir(local_path):
res = True
self.assertTrue(res)
#cleanup
shutil.rmtree(local_path)
########FTP###################
#tree what you want create must be correct
ftp_path = "ftp://tartampion@10.1.8.241/path/to/file"
#get an ftp handler to make some test
ftp = fs._getHandler(ftp_path)
#test correct running
self.assertTrue(fs.makedirs(ftp_path))
#test correct behavior
#try if tree exist on ftp server
res = False
try:
ftp._ftp.cwd(urlparse.urlparse(ftp_path).path)
res = True
except Exception:
pass
self.assertTrue(res)
def test_rmdirs(self):
'''
For all protocol we delete tree, then we test existence of the path
'''
##########Local###############
#tree what you want destroy must be correct
local_path = "file://L:/yolo"
local_path2 = "L:/yolo"
#test correct running
self.assertNotEqual(fs.rmdirs(local_path), False)
#test correct behavior
#try if tree exists in fs
self.assertNotEqual(os.path.isdir(local_path2), True)
#######FTP###################
#tree what you want destroy must be correct
path = "/path/to/file"
credentials = "ftp://tartampion@10.1.8.241"
ftp_path = credentials+path
#get an ftp handler to make some test
ftp = fs._getHandler(ftp_path)
#test correct running
self.assertNotEqual(fs.rmdirs(ftp_path), False)
#test correct behavior
#try if tree exist on ftp server
res = True
try:
ftp._ftp.cwd(urlparse.urlparse(ftp_path).path)
except Exception:
res = False
self.assertFalse(res)
def test_copy(self):
'''
for all protocols we copy a file the we test integrity of copy file
'''
##########Local###############
#-------------------------------------------------------------
#!!! Local -> Local
file_src = "file://L:/file.txt"
file_dst = "file://L:/file.txt.cpy"
file_src_path = "L:/file.txt"
file_dst_path = "L:/file.txt.cpy"
handler = fs._getHandler(file_src)
#generation src file
with open(file_src_path, "wb") as f:
f.write("hsdsgnhedgknjkhdgufshedfh")
#test running
self.assertTrue(fs.copy(file_src, file_dst))
#test file integrity
self.assertEqual(("").join(open(file_src_path).readlines()), ("").join(open(file_dst_path).readlines()))
#cleanup
os.remove(file_src_path)
os.remove(file_dst_path)
#-------------------------------------------------------------
#!!! FTP -> FTP
#this file must exists
file_src = "ftp://tartampion@10.1.8.241/file_test.txt"
file_dst = "ftp://tartampion@10.1.8.241/file_test.txt.cpy"
file_src_path = "/file_test.txt"
file_dst_path = "/file_test.txt.cpy"
handler = fs._getHandler(file_src)
#generation src file
stream = StringIO.StringIO()
stream.write("ramdom content")
handler.put(file_src_path, stream)
#test running
self.assertTrue(fs.copy(file_src, file_dst))
#test file integrity
self.content = ""
self.content2 = ""
def callback(s):
self.content += s
handler._ftp.retrbinary('RETR '+ file_src_path, callback)
def callback(s):
self.content2 += s
handler._ftp.retrbinary('RETR '+ file_dst_path, callback)
self.assertEqual(self.content, self.content2)
#cleanup
handler.delete(file_src_path)
handler.delete(file_dst_path)
#-------------------------------------------------------------
#!!! Local -> FTP
file_src = "file://L:/file.txt"
file_dst = "ftp://tartampion@10.1.8.241/file_test.txt"
file_src_path = "L:/file.txt"
file_dst_path = "/file_test.txt"
handler = fs._getHandler(file_dst)
#generation src file
with open(file_src_path, "wb") as f:
f.write("hsdsgnhedgknjkhdgufshedfh")
#test running
self.assertTrue(fs.copy(file_src, file_dst))
#test file integrity
self.content2 = ""
def callback(s):
self.content2 += s
handler._ftp.retrbinary('RETR '+ file_dst_path, callback)
self.assertEqual(("").join(open(file_src_path).readlines()), self.content2)
#cleanup
os.remove(file_src_path)
handler.delete(file_dst_path)
#-------------------------------------------------------------
#!!! FTP -> Local
file_src = "ftp://tartampion@10.1.8.241/file_test.txt"
file_dst = "file://L:/file.txt"
file_src_path = "/file_test.txt"
file_dst_path = "L:/file.txt"
handler = fs._getHandler(file_src)
#generation src file
stream = StringIO.StringIO()
stream.write("ramdom content")
handler.put(file_src_path, stream)
#test running
self.assertTrue(fs.copy(file_src, file_dst))
#test file integrity
self.content2 = ""
def callback(s):
self.content2 += s
handler._ftp.retrbinary('RETR '+ file_src_path, callback)
self.assertEqual(("").join(open(file_dst_path).readlines()), self.content2)
#cleanup
handler.delete(file_src_path)
os.remove(file_dst_path)
def test_move(self):
'''
for all protocols we copy a file the we test integrity of copy file
'''
##########Local###############
#-------------------------------------------------------------
#!!! Local -> Local
file_src = "file://L:/file.txt"
file_dst = "file://L:/file.txt.cpy"
file_src_path = "L:/file.txt"
file_dst_path = "L:/file.txt.cpy"
handler = fs._getHandler(file_src)
#generation src file
content = "hsdsgnhedgknjkhdgufshedfh"
with open(file_src_path, "wb") as f:
f.write(content)
#test running
self.assertTrue(fs.move(file_src, file_dst))
#test file integrity
self.assertEqual(content, ("").join(open(file_dst_path).readlines()))
#test deleting src file
res = True
try:
open(file_src)
except Exception:
res = False
self.assertFalse(res)
#cleanup
os.remove(file_dst_path)
#-------------------------------------------------------------
#!!! FTP -> FTP
#this file must exists
file_src = "ftp://tartampion@10.1.8.241/file_test.txt"
file_dst = "ftp://tartampion@10.1.8.241/file_test.txt.cpy"
file_src_path = "/file_test.txt"
file_dst_path = "/file_test.txt.cpy"
handler = fs._getHandler(file_src)
#generation src file
stream = StringIO.StringIO()
stream.write("ramdom content")
handler.put(file_src_path, stream)
#test running
self.assertTrue(fs.move(file_src, file_dst))
#test file integrity
self.content2 = ""
def callback(s):
self.content2 += s
handler._ftp.retrbinary('RETR '+ file_dst_path, callback)
self.assertEqual("ramdom content", self.content2)
#test deleting src file
self.assertFalse(handler.delete(file_src_path))
#cleanup
handler.delete(file_dst_path)
#-------------------------------------------------------------
#!!! Local -> FTP
file_src = "file://L:/file.txt"
file_dst = "ftp://tartampion@10.1.8.241/file_test.txt"
file_src_path = "L:/file.txt"
file_dst_path = "/file_test.txt"
handler = fs._getHandler(file_dst)
#generation src file
content = "hsdsgnhedgknjkhdgufshedfh"
with open(file_src_path, "wb") as f:
f.write(content)
#test running
self.assertTrue(fs.move(file_src, file_dst))
#test file integrity
self.content2 = ""
def callback(s):
self.content2 += s
handler._ftp.retrbinary('RETR '+ file_dst_path, callback)
self.assertEqual(content, self.content2)
#test deleting src file
res = True
try:
open(file_src)
except Exception:
res = False
self.assertFalse(res)
#cleanup
handler.delete(file_dst_path)
#-------------------------------------------------------------
#!!! FTP -> Local
file_src = "ftp://tartampion@10.1.8.241/file_test.txt"
file_dst = "file://L:/file.txt"
file_src_path = "/file_test.txt"
file_dst_path = "L:/file.txt"
handler = fs._getHandler(file_src)
#generation src file
stream = StringIO.StringIO()
stream.write("ramdom content")
handler.put(file_src_path, stream)
#test running
self.assertTrue(fs.move(file_src, file_dst))
#test file integrity
self.assertEqual("ramdom content", ("").join(open(file_dst_path).readlines()))
#test deleting src file
self.assertFalse(handler.delete(file_src_path))
#cleanup
os.remove(file_dst_path)
def test_delete(self):
'''
we create then we delete it
'''
#!!! Local
file_src = "file://L:/file.txt"
file_src_path = "L:/file.txt"
#creating file
with open(file_src_path, "wb") as f:
f.write("ghsdeiuhgsuidhgu")
#test running
self.assertTrue(fs.delete(file_src))
#test deleting
self.assertFalse(os.path.isfile(file_src))
#!!! FTP
file_src = "ftp://tartampion@10.1.8.241:8021/file.txt"
file_src_path = "/file.txt"
handler = fs._getHandler(file_src)
#creating file
stream = StringIO.StringIO()
stream.write("ramdom content")
handler.put(file_src_path, stream)
#test running
self.assertTrue(fs.delete(file_src))
#test deleting
self.assertFalse(handler.delete(file_src_path))
if __name__ == "__main__":
sys.argv.append('-vv')
unittest.main()
| |
import copy
import logging
from pickle import PicklingError
import os
from typing import Sequence
from ray.tune.error import TuneError
from ray.tune.registry import register_trainable, get_trainable_cls
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.sample import Domain
from ray.tune.stopper import CombinedStopper, FunctionStopper, Stopper, \
TimeoutStopper
from ray.tune.utils import detect_checkpoint_function
logger = logging.getLogger(__name__)
def _raise_deprecation_note(deprecated, replacement, soft=False):
"""User notification for deprecated parameter.
Arguments:
deprecated (str): Deprecated parameter.
replacement (str): Replacement parameter to use instead.
soft (bool): Fatal if True.
"""
error_msg = ("`{deprecated}` is deprecated. Please use `{replacement}`. "
"`{deprecated}` will be removed in future versions of "
"Ray.".format(deprecated=deprecated, replacement=replacement))
if soft:
logger.warning(error_msg)
else:
raise DeprecationWarning(error_msg)
def _raise_on_durable(trainable_name, sync_to_driver, upload_dir):
trainable_cls = get_trainable_cls(trainable_name)
from ray.tune.durable_trainable import DurableTrainable
if issubclass(trainable_cls, DurableTrainable):
if sync_to_driver is not False:
raise ValueError(
"EXPERIMENTAL: DurableTrainable will automatically sync "
"results to the provided upload_dir. "
"Set `sync_to_driver=False` to avoid data inconsistencies.")
if not upload_dir:
raise ValueError(
"EXPERIMENTAL: DurableTrainable will automatically sync "
"results to the provided upload_dir. "
"`upload_dir` must be provided.")
def _validate_log_to_file(log_to_file):
"""Validate ``tune.run``'s ``log_to_file`` parameter. Return
validated relative stdout and stderr filenames."""
if not log_to_file:
stdout_file = stderr_file = None
elif isinstance(log_to_file, bool) and log_to_file:
stdout_file = "stdout"
stderr_file = "stderr"
elif isinstance(log_to_file, str):
stdout_file = stderr_file = log_to_file
elif isinstance(log_to_file, Sequence):
if len(log_to_file) != 2:
raise ValueError(
"If you pass a Sequence to `log_to_file` it has to have "
"a length of 2 (for stdout and stderr, respectively). The "
"Sequence you passed has length {}.".format(len(log_to_file)))
stdout_file, stderr_file = log_to_file
else:
raise ValueError(
"You can pass a boolean, a string, or a Sequence of length 2 to "
"`log_to_file`, but you passed something else ({}).".format(
type(log_to_file)))
return stdout_file, stderr_file
class Experiment:
"""Tracks experiment specifications.
Implicitly registers the Trainable if needed. The args here take
the same meaning as the arguments defined `tune.py:run`.
.. code-block:: python
experiment_spec = Experiment(
"my_experiment_name",
my_func,
stop={"mean_accuracy": 100},
config={
"alpha": tune.grid_search([0.2, 0.4, 0.6]),
"beta": tune.grid_search([1, 2]),
},
resources_per_trial={
"cpu": 1,
"gpu": 0
},
num_samples=10,
local_dir="~/ray_results",
checkpoint_freq=10,
max_failures=2)
"""
def __init__(self,
name,
run,
stop=None,
time_budget_s=None,
config=None,
resources_per_trial=None,
num_samples=1,
local_dir=None,
upload_dir=None,
trial_name_creator=None,
trial_dirname_creator=None,
loggers=None,
log_to_file=False,
sync_to_driver=None,
checkpoint_freq=0,
checkpoint_at_end=False,
sync_on_checkpoint=True,
keep_checkpoints_num=None,
checkpoint_score_attr=None,
export_formats=None,
max_failures=0,
restore=None):
config = config or {}
if callable(run) and detect_checkpoint_function(run):
if checkpoint_at_end:
raise ValueError("'checkpoint_at_end' cannot be used with a "
"checkpointable function. You can specify "
"and register checkpoints within "
"your trainable function.")
if checkpoint_freq:
raise ValueError(
"'checkpoint_freq' cannot be used with a "
"checkpointable function. You can specify checkpoints "
"within your trainable function.")
self._run_identifier = Experiment.register_if_needed(run)
self.name = name or self._run_identifier
if upload_dir:
self.remote_checkpoint_dir = os.path.join(upload_dir, self.name)
else:
self.remote_checkpoint_dir = None
self._stopper = None
stopping_criteria = {}
if not stop:
pass
elif isinstance(stop, dict):
stopping_criteria = stop
elif callable(stop):
if FunctionStopper.is_valid_function(stop):
self._stopper = FunctionStopper(stop)
elif issubclass(type(stop), Stopper):
self._stopper = stop
else:
raise ValueError("Provided stop object must be either a dict, "
"a function, or a subclass of "
"`ray.tune.Stopper`.")
else:
raise ValueError("Invalid stop criteria: {}. Must be a "
"callable or dict".format(stop))
if time_budget_s:
if self._stopper:
self._stopper = CombinedStopper(self._stopper,
TimeoutStopper(time_budget_s))
else:
self._stopper = TimeoutStopper(time_budget_s)
_raise_on_durable(self._run_identifier, sync_to_driver, upload_dir)
stdout_file, stderr_file = _validate_log_to_file(log_to_file)
spec = {
"run": self._run_identifier,
"stop": stopping_criteria,
"config": config,
"resources_per_trial": resources_per_trial,
"num_samples": num_samples,
"local_dir": os.path.abspath(
os.path.expanduser(local_dir or DEFAULT_RESULTS_DIR)),
"upload_dir": upload_dir,
"remote_checkpoint_dir": self.remote_checkpoint_dir,
"trial_name_creator": trial_name_creator,
"trial_dirname_creator": trial_dirname_creator,
"loggers": loggers,
"log_to_file": (stdout_file, stderr_file),
"sync_to_driver": sync_to_driver,
"checkpoint_freq": checkpoint_freq,
"checkpoint_at_end": checkpoint_at_end,
"sync_on_checkpoint": sync_on_checkpoint,
"keep_checkpoints_num": keep_checkpoints_num,
"checkpoint_score_attr": checkpoint_score_attr,
"export_formats": export_formats or [],
"max_failures": max_failures,
"restore": os.path.abspath(os.path.expanduser(restore))
if restore else None
}
self.spec = spec
@classmethod
def from_json(cls, name, spec):
"""Generates an Experiment object from JSON.
Args:
name (str): Name of Experiment.
spec (dict): JSON configuration of experiment.
"""
if "run" not in spec:
raise TuneError("No trainable specified!")
# Special case the `env` param for RLlib by automatically
# moving it into the `config` section.
if "env" in spec:
spec["config"] = spec.get("config", {})
spec["config"]["env"] = spec["env"]
del spec["env"]
spec = copy.deepcopy(spec)
run_value = spec.pop("run")
try:
exp = cls(name, run_value, **spec)
except TypeError:
raise TuneError("Improper argument from JSON: {}.".format(spec))
return exp
@classmethod
def register_if_needed(cls, run_object):
"""Registers Trainable or Function at runtime.
Assumes already registered if run_object is a string.
Also, does not inspect interface of given run_object.
Arguments:
run_object (str|function|class): Trainable to run. If string,
assumes it is an ID and does not modify it. Otherwise,
returns a string corresponding to the run_object name.
Returns:
A string representing the trainable identifier.
"""
if isinstance(run_object, str):
return run_object
elif isinstance(run_object, Domain):
logger.warning("Not registering trainable. Resolving as variant.")
return run_object
elif isinstance(run_object, type) or callable(run_object):
name = "DEFAULT"
if hasattr(run_object, "__name__"):
name = run_object.__name__
else:
logger.warning(
"No name detected on trainable. Using {}.".format(name))
try:
register_trainable(name, run_object)
except (TypeError, PicklingError) as e:
msg = (
f"{str(e)}. The trainable ({str(run_object)}) could not "
"be serialized, which is needed for parallel execution. "
"To diagnose the issue, try the following:\n\n"
"\t- Run `tune.utils.diagnose_serialization(trainable)` "
"to check if non-serializable variables are captured "
"in scope.\n"
"\t- Try reproducing the issue by calling "
"`pickle.dumps(trainable)`.\n"
"\t- If the error is typing-related, try removing "
"the type annotations and try again.\n\n"
"If you have any suggestions on how to improve "
"this error message, please reach out to the "
"Ray developers on github.com/ray-project/ray/issues/")
raise type(e)(msg) from None
return name
else:
raise TuneError("Improper 'run' - not string nor trainable.")
@property
def stopper(self):
return self._stopper
@property
def local_dir(self):
return self.spec.get("local_dir")
@property
def checkpoint_dir(self):
if self.local_dir:
return os.path.join(self.local_dir, self.name)
@property
def run_identifier(self):
"""Returns a string representing the trainable identifier."""
return self._run_identifier
def convert_to_experiment_list(experiments):
"""Produces a list of Experiment objects.
Converts input from dict, single experiment, or list of
experiments to list of experiments. If input is None,
will return an empty list.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
Returns:
List of experiments.
"""
exp_list = experiments
# Transform list if necessary
if experiments is None:
exp_list = []
elif isinstance(experiments, Experiment):
exp_list = [experiments]
elif type(experiments) is dict:
exp_list = [
Experiment.from_json(name, spec)
for name, spec in experiments.items()
]
# Validate exp_list
if (type(exp_list) is list
and all(isinstance(exp, Experiment) for exp in exp_list)):
if len(exp_list) > 1:
logger.info(
"Running with multiple concurrent experiments. "
"All experiments will be using the same SearchAlgorithm.")
else:
raise TuneError("Invalid argument: {}".format(experiments))
return exp_list
| |
# -*- coding: utf-8 -*-
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromite main test runner.
Run the specified tests. If none are specified, we'll scan the
tree looking for tests to run and then only run the semi-fast ones.
You can add a .testignore file to a dir to disable scanning it.
"""
from __future__ import print_function
import errno
import glob
import json
import math
import multiprocessing
import os
import signal
import stat
import subprocess
import sys
import tempfile
from chromite.lib import constants
from chromite.lib import cgroups
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import gs
from chromite.lib import namespaces
from chromite.lib import osutils
from chromite.lib import path_util
from chromite.lib import proctitle
from chromite.lib import timeout_util
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# How long (in minutes) to let a test run before we kill it.
TEST_TIMEOUT = 20
# How long (in minutes) before we send SIGKILL after the timeout above.
TEST_SIG_TIMEOUT = 5
# How long (in seconds) to let tests clean up after CTRL+C is sent.
SIGINT_TIMEOUT = 5
# How long (in seconds) to let all children clean up after CTRL+C is sent.
# This has to be big enough to try and tear down ~72 parallel tests (which is
# how many cores we commonly have in Googler workstations today).
CTRL_C_TIMEOUT = SIGINT_TIMEOUT + 15
# The cache file holds various timing information. This is used later on to
# optimistically sort tests so the slowest ones run first. That way we don't
# wait until all of the fast ones finish before we launch the slow ones.
TIMING_CACHE_FILE = None
# Test has to run inside the chroot.
INSIDE = 'inside'
# Test has to run outside the chroot.
OUTSIDE = 'outside'
# Don't run this test (please add a comment as to why).
SKIP = 'skip'
# List all exceptions, with a token describing what's odd here.
SPECIAL_TESTS = {
# Tests that need to run inside the chroot.
'cli/cros/lint_unittest': INSIDE,
'lib/cros_test_lib_unittest': INSIDE,
'lib/operation_unittest': INSIDE,
# These require 3rd party modules that are in the chroot.
'lib/dev_server_wrapper_unittest': INSIDE,
'lib/xbuddy/build_artifact_unittest': INSIDE,
'lib/xbuddy/common_util_unittest': INSIDE,
'lib/xbuddy/downloader_unittest': INSIDE,
'lib/xbuddy/xbuddy_unittest': INSIDE,
}
SLOW_TESTS = {
# Tests that require network can be really slow.
'lib/cros_build_lib_unittest': SKIP,
'lib/gce_unittest': SKIP,
'lib/gerrit_unittest': SKIP,
'lib/patch_unittest': SKIP,
}
def RunTest(test, interp, cmd, tmpfile, finished, total):
"""Run |test| with the |cmd| line and save output to |tmpfile|.
Args:
test: The human readable name for this test.
interp: Which Python version to use.
cmd: The full command line to run the test.
tmpfile: File to write test output to.
finished: Counter to update when this test finishes running.
total: Total number of tests to run.
Returns:
The exit code of the test.
"""
logging.info('Starting %s %s', interp, test)
with cros_build_lib.TimedSection() as timer:
ret = cros_build_lib.run(
cmd, capture_output=True, check=False,
stderr=subprocess.STDOUT, debug_level=logging.DEBUG,
int_timeout=SIGINT_TIMEOUT)
with finished.get_lock():
finished.value += 1
if ret.returncode:
func = logging.error
msg = 'Failed'
else:
func = logging.info
msg = 'Finished'
func('%s [%i/%i] %s %s (%s)', msg, finished.value, total, interp, test,
timer.delta)
# Save the timing for this test run for future usage.
seconds = timer.delta.total_seconds()
try:
cache = json.load(open(TIMING_CACHE_FILE))
except (IOError, ValueError):
cache = {}
if test in cache:
seconds = (cache[test] + seconds) // 2
cache[test] = seconds
json.dump(cache, open(TIMING_CACHE_FILE, 'w'))
if ret.returncode:
tmpfile.write(ret.output)
if not ret.output:
tmpfile.write('<no output>\n')
tmpfile.close()
return ret.returncode
def SortTests(tests, jobs=1, timing_cache_file=None):
"""Interleave the slowest & fastest
Hopefully we can pipeline the overall process better by queueing the slowest
tests first while also using half the slots for fast tests. We don't need
the timing info to be exact, just ballpark.
Args:
tests: The list of tests to sort.
jobs: How many jobs will we run in parallel.
timing_cache_file: Where to read test timing info.
Returns:
The tests ordered for best execution timing (we hope).
"""
if timing_cache_file is None:
timing_cache_file = TIMING_CACHE_FILE
# Usually |tests| will be a generator -- break it down.
tests = list(tests)
# If we have enough spare cpus to crunch the jobs, just do so.
if len(tests) <= jobs:
return tests
# Create a dict mapping tests to their timing information using the cache.
try:
with cros_build_lib.Open(timing_cache_file) as f:
cache = json.load(f)
except (IOError, ValueError):
cache = {}
# Sort the cached list of tests from slowest to fastest.
sorted_tests = [test for (test, _timing) in
sorted(cache.items(), key=lambda x: x[1], reverse=True)]
# Then extract the tests from the cache list that we care about -- remember
# that the cache could be stale and contain tests that no longer exist, or
# the user only wants to run a subset of tests.
ret = []
for test in sorted_tests:
if test in tests:
ret.append(test)
tests.remove(test)
# Any tests not in the cache we just throw on the end. No real way to
# predict their speed ahead of time, and we'll get useful data when they
# run the test a second time.
ret += tests
# Now interleave the fast & slow tests so every other one mixes.
# On systems with fewer cores, this can help out in two ways:
# (1) Better utilization of resources when some slow tests are I/O or time
# bound, so the other cores can spawn/fork fast tests faster (generally).
# (2) If there is common code that is broken, we get quicker feedback if we
# churn through the fast tests.
# Worse case, this interleaving doesn't slow things down overall.
fast = ret[:int(math.ceil(len(ret) / 2.0)) - 1:-1]
slow = ret[:-len(fast)]
ret[::2] = slow
ret[1::2] = fast
return ret
def BuildTestSets(tests, chroot_available, network, jobs=1,
pyver=None):
"""Build the tests to execute.
Take care of special test handling like whether it needs to be inside or
outside of the sdk, whether the test should be skipped, etc...
Args:
tests: List of tests to execute.
chroot_available: Whether we can execute tests inside the sdk.
network: Whether to execute network tests.
jobs: How many jobs will we run in parallel.
pyver: Which versions of Python to test against.
Returns:
List of tests to execute and their full command line.
"""
testsets = []
def PythonWrappers(tests):
for test in tests:
if pyver is None or pyver == 'py2':
if (os.path.basename(os.path.realpath(test)) not in
{'virtualenv_wrapper.py', 'wrapper3.py'}):
yield (test, 'python2')
if pyver is None or pyver == 'py3':
yield (test, 'python3')
for (test, interp) in PythonWrappers(SortTests(tests, jobs=jobs)):
cmd = [interp, test]
# See if this test requires special consideration.
status = SPECIAL_TESTS.get(test)
if status is SKIP:
logging.info('Skipping %s', test)
continue
elif status is INSIDE:
if not cros_build_lib.IsInsideChroot():
if not chroot_available:
logging.info('Skipping %s: chroot not available', test)
continue
cmd = ['cros_sdk', '--', interp,
os.path.join('..', '..', 'chromite', test)]
elif status is OUTSIDE:
if cros_build_lib.IsInsideChroot():
logging.info('Skipping %s: must be outside the chroot', test)
continue
else:
mode = os.stat(test).st_mode
if stat.S_ISREG(mode):
if not mode & 0o111:
logging.debug('Skipping %s: not executable', test)
continue
else:
logging.debug('Skipping %s: not a regular file', test)
continue
# Build up the final test command.
cmd.append('--verbose')
if network:
cmd.append('--network')
cmd = ['timeout', '--preserve-status', '-k', '%sm' % TEST_SIG_TIMEOUT,
'%sm' % TEST_TIMEOUT] + cmd
testsets.append((test, interp, cmd, tempfile.TemporaryFile()))
return testsets
def RunTests(tests, jobs=1, chroot_available=True, network=False,
dryrun=False, failfast=False, pyver=None):
"""Execute |tests| with |jobs| in parallel (including |network| tests).
Args:
tests: The tests to run.
jobs: How many tests to run in parallel.
chroot_available: Whether we can run tests inside the sdk.
network: Whether to run network based tests.
dryrun: Do everything but execute the test.
failfast: Stop on first failure
pyver: Which versions of Python to test against.
Returns:
True if all tests pass, else False.
"""
finished = multiprocessing.Value('i')
testsets = []
pids = []
failed = aborted = False
def WaitOne():
(pid, status) = os.wait()
pids.remove(pid)
return status
# Launch all the tests!
try:
# Build up the testsets.
testsets = BuildTestSets(tests, chroot_available, network,
jobs=jobs, pyver=pyver)
# Fork each test and add it to the list.
for test, interp, cmd, tmpfile in testsets:
if failed and failfast:
logging.error('failure detected; stopping new tests')
break
if len(pids) >= jobs:
if WaitOne():
failed = True
pid = os.fork()
if pid == 0:
proctitle.settitle(test)
ret = 1
try:
if dryrun:
logging.info('Would have run: %s', cros_build_lib.CmdToStr(cmd))
ret = 0
else:
ret = RunTest(test, interp, cmd, tmpfile, finished, len(testsets))
except KeyboardInterrupt:
pass
except BaseException:
logging.error('%s failed', test, exc_info=True)
# We cannot run clean up hooks in the child because it'll break down
# things like tempdir context managers.
os._exit(ret) # pylint: disable=protected-access
pids.append(pid)
# Wait for all of them to get cleaned up.
while pids:
if WaitOne():
failed = True
except KeyboardInterrupt:
# If the user wants to stop, reap all the pending children.
logging.warning('CTRL+C received; cleaning up tests')
aborted = True
CleanupChildren(pids)
# Walk through the results.
passed_tests = []
failed_tests = []
for test, interp, cmd, tmpfile in testsets:
tmpfile.seek(0)
output = tmpfile.read().decode('utf-8', 'replace')
desc = '[%s] %s' % (interp, test)
if output:
failed_tests.append(desc)
logging.error('### LOG: %s\n%s\n', desc, output.rstrip())
else:
passed_tests.append(desc)
if passed_tests:
logging.debug('The following %i tests passed:\n %s', len(passed_tests),
'\n '.join(sorted(passed_tests)))
if failed_tests:
logging.error('The following %i tests failed:\n %s', len(failed_tests),
'\n '.join(sorted(failed_tests)))
return False
elif aborted or failed:
return False
return True
def CleanupChildren(pids):
"""Clean up all the children in |pids|."""
# Note: SIGINT was already sent due to the CTRL+C via the kernel itself.
# So this func is just waiting for them to clean up.
handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
def _CheckWaitpid(ret):
(pid, _status) = ret
if pid:
try:
pids.remove(pid)
except ValueError:
# We might have reaped a grandchild -- be robust.
pass
return len(pids)
def _Waitpid():
try:
return os.waitpid(-1, os.WNOHANG)
except OSError as e:
if e.errno == errno.ECHILD:
# All our children went away!
pids[:] = []
return (0, 0)
else:
raise
def _RemainingTime(remaining):
print('\rwaiting %s for %i tests to exit ... ' % (remaining, len(pids)),
file=sys.stderr, end='')
try:
timeout_util.WaitForSuccess(_CheckWaitpid, _Waitpid,
timeout=CTRL_C_TIMEOUT, period=0.1,
side_effect_func=_RemainingTime)
print('All tests cleaned up!')
return
except timeout_util.TimeoutError:
# Let's kill them hard now.
print('Hard killing %i tests' % len(pids))
for pid in pids:
try:
os.kill(pid, signal.SIGKILL)
except OSError as e:
if e.errno != errno.ESRCH:
raise
finally:
signal.signal(signal.SIGINT, handler)
def FindTests(search_paths=('.',)):
"""Find all the tests available in |search_paths|."""
for search_path in search_paths:
for root, dirs, files in os.walk(search_path):
if os.path.exists(os.path.join(root, '.testignore')):
# Delete the dir list in place.
dirs[:] = []
continue
dirs[:] = [x for x in dirs if x[0] != '.']
for path in files:
test = os.path.join(os.path.relpath(root, search_path), path)
if test.endswith('_unittest'):
yield test
def CheckStaleSettings():
"""Check various things to make sure they don't get stale."""
die = False
for test in SPECIAL_TESTS:
if not os.path.exists(test):
die = True
logging.error('SPECIAL_TESTS is stale: delete old %s', test)
for test in SLOW_TESTS:
if not os.path.exists(test):
die = True
logging.error('SLOW_TESTS is stale: delete old %s', test)
# Sanity check wrapper scripts.
for path in glob.glob('bin/*'):
if os.path.islink(path):
src = os.path.join('scripts', os.path.basename(path) + '.py')
if not os.path.exists(src):
die = True
logging.error('Stale symlink should be removed: %s', path)
if die:
cros_build_lib.Die('Please fix the above problems first')
def ClearPythonCacheFiles():
"""Clear cache files in the chromite repo.
When switching branches, modules can be deleted or renamed, but the old pyc
files stick around and confuse Python. This is a bit of a hack, but should
be good enough for now.
"""
result = cros_build_lib.dbg_run(
['git', 'ls-tree', '-r', '-z', '--name-only', 'HEAD'], encoding='utf-8',
capture_output=True)
for subdir in set(os.path.dirname(x) for x in result.stdout.split('\0')):
for path in glob.glob(os.path.join(subdir, '*.pyc')):
osutils.SafeUnlink(path, sudo=True)
osutils.RmDir(os.path.join(subdir, '__pycache__'), ignore_missing=True,
sudo=True)
def ChrootAvailable():
"""See if `cros_sdk` will work at all.
If we try to run unittests in the buildtools group, we won't be able to
create one.
"""
# The chromiumos-overlay project isn't in the buildtools group.
path = os.path.join(constants.SOURCE_ROOT, constants.CHROMIUMOS_OVERLAY_DIR)
return os.path.exists(path)
def _ReExecuteIfNeeded(argv, network):
"""Re-execute as root so we can unshare resources."""
if os.geteuid() != 0:
cmd = ['sudo', '-E', 'HOME=%s' % os.environ['HOME'],
'PATH=%s' % os.environ['PATH'], '--'] + argv
os.execvp(cmd[0], cmd)
else:
cgroups.Cgroup.InitSystem()
namespaces.SimpleUnshare(net=not network, pid=True)
# We got our namespaces, so switch back to the user to run the tests.
gid = int(os.environ.pop('SUDO_GID'))
uid = int(os.environ.pop('SUDO_UID'))
user = os.environ.pop('SUDO_USER')
os.initgroups(user, gid)
os.setresgid(gid, gid, gid)
os.setresuid(uid, uid, uid)
os.environ['USER'] = user
def GetParser():
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('-f', '--failfast', default=False, action='store_true',
help='Stop on first failure')
parser.add_argument('-q', '--quick', default=False, action='store_true',
help='Only run the really quick tests')
parser.add_argument('-n', '--dry-run', default=False, action='store_true',
dest='dryrun',
help='Do everything but actually run the test')
parser.add_argument('-l', '--list', default=False, action='store_true',
help='List all the available tests')
parser.add_argument('-j', '--jobs', type=int,
help='Number of tests to run in parallel at a time')
parser.add_argument('--network', default=False, action='store_true',
help='Run tests that depend on good network connectivity')
parser.add_argument('--py2', dest='pyver', action='store_const', const='py2',
help='Only run Python 2 unittests.')
parser.add_argument('--py3', dest='pyver', action='store_const', const='py3',
help='Only run Python 3 unittests.')
parser.add_argument('--clear-pycache', action='store_true',
help='Clear .pyc files, then exit without running tests.')
parser.add_argument('tests', nargs='*', default=None, help='Tests to run')
return parser
def main(argv):
parser = GetParser()
opts = parser.parse_args(argv)
opts.Freeze()
# Process list output quickly as it takes no privileges.
if opts.list:
tests = set(opts.tests or FindTests((constants.CHROMITE_DIR,)))
print('\n'.join(sorted(tests)))
return
# Many of our tests require a valid chroot to run. Make sure it's created
# before we block network access.
chroot = os.path.join(constants.SOURCE_ROOT, constants.DEFAULT_CHROOT_DIR)
if (not os.path.exists(chroot) and
ChrootAvailable() and
not cros_build_lib.IsInsideChroot()):
cros_build_lib.run(['cros_sdk', '--create'])
# This is a cheesy hack to make sure gsutil is populated in the cache before
# we run tests. This is a partial workaround for crbug.com/468838.
gs.GSContext.GetDefaultGSUtilBin()
# Now let's run some tests.
_ReExecuteIfNeeded([sys.argv[0]] + argv, opts.network)
# A lot of pieces here expect to be run in the root of the chromite tree.
# Make them happy.
os.chdir(constants.CHROMITE_DIR)
tests = opts.tests or FindTests()
# Clear python caches now that we're root, in the right dir, but before we
# run any tests.
ClearPythonCacheFiles()
if opts.clear_pycache:
return
# Sanity check the environment. https://crbug.com/1015450
st = os.stat('/')
if st.st_mode & 0o7777 != 0o755:
cros_build_lib.Die('The root directory has broken permissions: %o\n'
'Fix with: sudo chmod 755 /' % (st.st_mode,))
if st.st_uid or st.st_gid:
cros_build_lib.Die('The root directory has broken ownership: %i:%i'
' (should be 0:0)\nFix with: sudo chown 0:0 /' %
(st.st_uid, st.st_gid))
# Sanity check the settings to avoid bitrot.
CheckStaleSettings()
if opts.quick:
SPECIAL_TESTS.update(SLOW_TESTS)
global TIMING_CACHE_FILE # pylint: disable=global-statement
TIMING_CACHE_FILE = os.path.join(
path_util.GetCacheDir(), constants.COMMON_CACHE, 'run_tests.cache.json')
jobs = opts.jobs or multiprocessing.cpu_count()
with cros_build_lib.ContextManagerStack() as stack:
# If we're running outside the chroot, try to contain ourselves.
if cgroups.Cgroup.IsSupported() and not cros_build_lib.IsInsideChroot():
stack.Add(cgroups.SimpleContainChildren, 'run_tests')
# Throw all the tests into a custom tempdir so that if we do CTRL+C, we can
# quickly clean up all the files they might have left behind.
stack.Add(osutils.TempDir, prefix='chromite.run_tests.', set_global=True,
sudo_rm=True)
with cros_build_lib.TimedSection() as timer:
result = RunTests(
tests, jobs=jobs, chroot_available=ChrootAvailable(),
network=opts.network, dryrun=opts.dryrun,
failfast=opts.failfast, pyver=opts.pyver)
if result:
logging.info('All tests succeeded! (%s total)', timer.delta)
else:
return 1
if not opts.network:
logging.warning('Network tests skipped; use --network to run them')
| |
#!/usr/bin/python
# ---------------------------------------------------------------------------
# File: revision2.py
# Author: Yining Chen
# Modified from IBM's cplex mixed integer programming example mipex1.py
# ---------------------------------------------------------------------------
# Vector x: 2*n*n+7*n total
# 0 to (n*n-1): x(i->j); n*n entries
# (n*n) to (2*n*n-1): f(i->j); n*n entries
# (2*n*n) to (2*n*n-1+n): x extern n entries
# (2*n*n+n) to (2*n*n-1+7*n): f extern (+x, -x, +y, -y, +z, -z) 6*n entries
# ---------------------------------------------------------------------------
# Equations: 2*n*n+3*n total
# x, y, z directions equilibrium * n balls 3*n entries
# f(i)-x(i) if-else clause n*n+n entries
# f(a->b) = f(b->a) n*(n-1)/2 entries
# x(a->b) = x(b->a) n*(n-1)/2 entries
from __future__ import print_function
import sys
import cplex
import math
from cplex.exceptions import CplexError
# constants
m1 = 9999
m2 = 8888
verysmall = 0.0
# inputs
n = 4
my_balls_x = [0.0, 0.0, 0.0, 0.0]
my_balls_y = [0.0, 1.0, 0.0, 1.0]
my_balls_z = [1.0, 1,0, 0.0, 0.0]
my_balls_g = [1.0, 1.0, 1.0, 1.0]
# fill in my_obj
len_sum = 0.0;
my_obj=[0.0 for x in range(2*n*n+7*n)]
my_colnames=["" for x in range(2*n*n+7*n)]
for i in range(0,n):
for j in range(0,n):
# my_obj for each edge (i->j) is -edge_length
my_obj[i*n+j] = -math.sqrt((my_balls_x[i]-my_balls_x[j])*(my_balls_x[i]-my_balls_x[j])+(my_balls_y[i]-my_balls_y[j])*(my_balls_y[i]-my_balls_y[j])+(my_balls_z[i]-my_balls_z[j])*(my_balls_z[i]-my_balls_z[j]))
# summing up all edge_lengths
len_sum = len_sum - my_obj[i*n+j]
my_colnames[i*n+j]="x("+str(i)+","+str(j)+")"
m = n*n -1
for i in range(n):
for j in range(0,n):
m+=1
my_colnames[m]="f("+str(i)+","+str(j)+")"
for i in range(n):
m = m+1
# my_obj for each external edge is -len_sum-1.0
my_obj[m]= -len_sum-1.0
my_colnames[m]="xex("+str(i)+")"
for i in range(n*6):
m = m+1
my_colnames[m]="fex("+str(i/6)+","+str(i%6+1)+")"
# fill in my_ub, my_lb, my_ctype
my_ctype = ""
my_ub=[0.0 for x in range(2*n*n+7*n)]
my_lb=[0.0 for x in range(2*n*n+7*n)]
for i in range(0,n):
for j in range(0,n):
# x(i->j) is either 0 or 1 when i!=j
# x(i->i) has to be 0
if i!=j:
my_ub[i*n+j] = 1.1
else:
my_ub[i*n+j] = 0.1
my_lb[i*n+j] = -0.1
my_ctype = my_ctype + "I"
m = n*n -1
for i in range(0, n*n):
m = m+1
# each f is non-negative and has no upper bound
my_ub[m] = cplex.infinity
my_lb[m] = 0.0
my_ctype = my_ctype + "C"
for i in range(0, n):
m = m+1
# x_external(i) is either 0 or 1
my_ub[m] = 1.1
my_lb[m] = -0.1
my_ctype = my_ctype + "I"
for i in range(0, n*6):
m = m+1
# each f_external is non-negative and has no upper bound
my_ub[m] = cplex.infinity
my_lb[m] = 0.0
my_ctype = my_ctype + "C"
# fill in my_rhs, my_sense, my_rownames
my_sense = ""
my_rhs = [0.0 for x in range(2*n*n+3*n)]
my_rownames = ["r" for x in range(2*n*n+3*n)]
for i in range(2*n*n+3*n):
my_rownames[i]="r("+str(i)+")"
for i in range(n):
# equilibrium in x, y, z directions
my_rhs[i*3] = 0.0
my_rhs[i*3+1] = 0.0
my_rhs[i*3+2] = my_balls_g[i]
my_sense = my_sense + "EEE"
m = n*3-1
for i in range(n*n+n):
# when x(i) is 0, f(i) has to be 0
m = m+1
my_rhs[m] = verysmall
my_sense = my_sense + "L"
for i in range(n*(n-1)):
# Newton's third law
m = m+1
my_rhs[m] = 0.0
my_sense = my_sense + "E"
def populatebyrow(prob):
prob.objective.set_sense(prob.objective.sense.maximize)
prob.variables.add(obj=my_obj, lb=my_lb, ub=my_ub, types=my_ctype,
names=my_colnames)
# fill in rows
rows=[[[] for x in range(2)] for x in range(2*n*n+3*n)]
# 3*n equilibrium
for i in range(0,n):
# rows[i*3]: [[(n + 2) entries],[(n + 2) entries]], equilibrium in x direction
# rows[i*3+1]: [[(n + 2) entries],[(n + 2) entries]], equilibrium in y direction
# rows[i*3+2]: [[(n + 2) entries],[(n + 2) entries]], equilibrium in z direction
rows[i*3][0]=[]
rows[i*3][1]=[]
rows[i*3+1][0]=[]
rows[i*3+1][1]=[]
rows[i*3+2][0]=[]
rows[i*3+2][1]=[]
for j in range(0,n):
if i!=j :
rows[i*3][0]+=[my_colnames[n*n + j*n+i]]
rows[i*3][1]+=[-(my_balls_x[j]-my_balls_x[i])/my_obj[j*n+i]]
rows[i*3+1][0]+=[my_colnames[n*n + j*n+i]]
rows[i*3+1][1]+=[-(my_balls_y[j]-my_balls_y[i])/my_obj[j*n+i]]
rows[i*3+2][0]+=[my_colnames[n*n + j*n+i]]
rows[i*3+2][1]+=[-(my_balls_z[j]-my_balls_z[i])/my_obj[j*n+i]]
# add +x, -x
rows[i*3][0]+=[my_colnames[2*n*n+n+i*6],my_colnames[2*n*n+n+i*6+1]]
rows[i*3][1]+=[1.0, -1.0]
# add +y, -y
rows[i*3+1][0]+=[my_colnames[2*n*n+n+i*6+2],my_colnames[2*n*n+n+i*6+3]]
rows[i*3+1][1]+=[1.0, -1.0]
# add +z, -z
rows[i*3+2][0]+=[my_colnames[2*n*n+n+i*6+4],my_colnames[2*n*n+n+i*6+5]]
rows[i*3+2][1]+=[1.0, -1.0]
# when x(i) is 0, f(i) has to be 0 for internal fs
m = n*3-1
for i in range(0,n):
for j in range(0,n):
m+=1
rows[m][0]=[my_colnames[n*n + i*n+j], my_colnames[i*n+j]]
rows[m][1]=[1.0,-m2]
# when x(i) is 0, f(i) has to be 0 for external fs
for i in range(n):
m+=1
rows[m][0]=[my_colnames[2*n*n+n+i*6], my_colnames[2*n*n+n+i*6+1],my_colnames[2*n*n+n+i*6+2],my_colnames[2*n*n+n+i*6+3],my_colnames[2*n*n+n+i*6+4],my_colnames[2*n*n+n+i*6+5],my_colnames[2*n*n+i]]
rows[m][1]=[1.0,1.0,1.0,1.0,1.0,1.0,-m2]
# f(a,b)=f(b,a)
for i in range(0,n-1):
for j in range(i+1,n):
m+=1
rows[m][0]=[my_colnames[n*n + j*n+i], my_colnames[n*n + i*n+j]]
rows[m][1]=[1.0,-1.0]
# x(a,b)=x(b,a)
for i in range(0,n-1):
for j in range(i+1,n):
m+=1
rows[m][0]=[my_colnames[j*n+i], my_colnames[i*n+j]]
rows[m][1]=[1.0,-1.0]
print("Constraints Printout:")
for i in range(2*n*n+7*n):
print("Column ",i,my_lb[i],"<=",my_colnames[i],"<=",my_ub[i],"weight =",my_obj[i],"type =",my_ctype[i])
print()
print("Equations Printout:")
for i in range(2*n*n+3*n):
print(i,rows[i],my_sense[i],my_rhs[i])
print()
prob.linear_constraints.add(lin_expr=rows, senses=my_sense,
rhs=my_rhs, names=my_rownames)
def main():
try:
my_prob = cplex.Cplex()
handle = populatebyrow(my_prob)
my_prob.solve()
except CplexError as exc:
print(exc)
return
print()
# solution.get_status() returns an integer code
print("Solution status = ", my_prob.solution.get_status(), ":", end=' ')
# the following line prints the corresponding string
print(my_prob.solution.status[my_prob.solution.get_status()])
print("Solution value = ", my_prob.solution.get_objective_value())
numcols = my_prob.variables.get_num()
numrows = my_prob.linear_constraints.get_num()
slack = my_prob.solution.get_linear_slacks()
x = my_prob.solution.get_values()
for j in range(numrows):
print("Row %d: Slack = %10f" % (j, slack[j]))
for j in range(numcols):
print("Column %d %s: Value = %10f" % (j, my_colnames[j],x[j]))
if __name__ == "__main__":
main()
| |
from collections import (
OrderedDict,
defaultdict,
)
from datetime import datetime
import numpy as np
import pytest
import pytz
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameToDict:
def test_to_dict_timestamp(self):
# GH#11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp("20130101")
test_data = DataFrame({"A": [tsmp, tsmp], "B": [tsmp, tsmp]})
test_data_mixed = DataFrame({"A": [tsmp, tsmp], "B": [1, 2]})
expected_records = [{"A": tsmp, "B": tsmp}, {"A": tsmp, "B": tsmp}]
expected_records_mixed = [{"A": tsmp, "B": 1}, {"A": tsmp, "B": 2}]
assert test_data.to_dict(orient="records") == expected_records
assert test_data_mixed.to_dict(orient="records") == expected_records_mixed
expected_series = {
"A": Series([tsmp, tsmp], name="A"),
"B": Series([tsmp, tsmp], name="B"),
}
expected_series_mixed = {
"A": Series([tsmp, tsmp], name="A"),
"B": Series([1, 2], name="B"),
}
tm.assert_dict_equal(test_data.to_dict(orient="series"), expected_series)
tm.assert_dict_equal(
test_data_mixed.to_dict(orient="series"), expected_series_mixed
)
expected_split = {
"index": [0, 1],
"data": [[tsmp, tsmp], [tsmp, tsmp]],
"columns": ["A", "B"],
}
expected_split_mixed = {
"index": [0, 1],
"data": [[tsmp, 1], [tsmp, 2]],
"columns": ["A", "B"],
}
tm.assert_dict_equal(test_data.to_dict(orient="split"), expected_split)
tm.assert_dict_equal(
test_data_mixed.to_dict(orient="split"), expected_split_mixed
)
def test_to_dict_index_not_unique_with_index_orient(self):
# GH#22801
# Data loss when indexes are not unique. Raise ValueError.
df = DataFrame({"a": [1, 2], "b": [0.5, 0.75]}, index=["A", "A"])
msg = "DataFrame index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
df.to_dict(orient="index")
def test_to_dict_invalid_orient(self):
df = DataFrame({"A": [0, 1]})
msg = "orient 'xinvalid' not understood"
with pytest.raises(ValueError, match=msg):
df.to_dict(orient="xinvalid")
@pytest.mark.parametrize("orient", ["d", "l", "r", "sp", "s", "i"])
def test_to_dict_short_orient_warns(self, orient):
# GH#32515
df = DataFrame({"A": [0, 1]})
msg = "Using short name for 'orient' is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
df.to_dict(orient=orient)
@pytest.mark.parametrize("mapping", [dict, defaultdict(list), OrderedDict])
def test_to_dict(self, mapping):
# orient= should only take the listed options
# see GH#32515
test_data = {"A": {"1": 1, "2": 2}, "B": {"1": "1", "2": "2", "3": "3"}}
# GH#16122
recons_data = DataFrame(test_data).to_dict(into=mapping)
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k][k2]
recons_data = DataFrame(test_data).to_dict("list", mapping)
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k][int(k2) - 1]
recons_data = DataFrame(test_data).to_dict("series", mapping)
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k][k2]
recons_data = DataFrame(test_data).to_dict("split", mapping)
expected_split = {
"columns": ["A", "B"],
"index": ["1", "2", "3"],
"data": [[1.0, "1"], [2.0, "2"], [np.nan, "3"]],
}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("records", mapping)
expected_records = [
{"A": 1.0, "B": "1"},
{"A": 2.0, "B": "2"},
{"A": np.nan, "B": "3"},
]
assert isinstance(recons_data, list)
assert len(recons_data) == 3
for left, right in zip(recons_data, expected_records):
tm.assert_dict_equal(left, right)
# GH#10844
recons_data = DataFrame(test_data).to_dict("index")
for k, v in test_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k2][k]
df = DataFrame(test_data)
df["duped"] = df[df.columns[0]]
recons_data = df.to_dict("index")
comp_data = test_data.copy()
comp_data["duped"] = comp_data[df.columns[0]]
for k, v in comp_data.items():
for k2, v2 in v.items():
assert v2 == recons_data[k2][k]
@pytest.mark.parametrize("mapping", [list, defaultdict, []])
def test_to_dict_errors(self, mapping):
# GH#16122
df = DataFrame(np.random.randn(3, 3))
msg = "|".join(
[
"unsupported type: <class 'list'>",
r"to_dict\(\) only accepts initialized defaultdicts",
]
)
with pytest.raises(TypeError, match=msg):
df.to_dict(into=mapping)
def test_to_dict_not_unique_warning(self):
# GH#16927: When converting to a dict, if a column has a non-unique name
# it will be dropped, throwing a warning.
df = DataFrame([[1, 2, 3]], columns=["a", "a", "b"])
with tm.assert_produces_warning(UserWarning):
df.to_dict()
# orient - orient argument to to_dict function
# item_getter - function for extracting value from
# the resulting dict using column name and index
@pytest.mark.parametrize(
"orient,item_getter",
[
("dict", lambda d, col, idx: d[col][idx]),
("records", lambda d, col, idx: d[idx][col]),
("list", lambda d, col, idx: d[col][idx]),
("split", lambda d, col, idx: d["data"][idx][d["columns"].index(col)]),
("index", lambda d, col, idx: d[idx][col]),
],
)
def test_to_dict_box_scalars(self, orient, item_getter):
# GH#14216, GH#23753
# make sure that we are boxing properly
df = DataFrame({"a": [1, 2], "b": [0.1, 0.2]})
result = df.to_dict(orient=orient)
assert isinstance(item_getter(result, "a", 0), int)
assert isinstance(item_getter(result, "b", 0), float)
def test_to_dict_tz(self):
# GH#18372 When converting to dict with orient='records' columns of
# datetime that are tz-aware were not converted to required arrays
data = [
(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),),
(datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc),),
]
df = DataFrame(list(data), columns=["d"])
result = df.to_dict(orient="records")
expected = [
{"d": Timestamp("2017-11-18 21:53:00.219225+0000", tz=pytz.utc)},
{"d": Timestamp("2017-11-18 22:06:30.061810+0000", tz=pytz.utc)},
]
tm.assert_dict_equal(result[0], expected[0])
tm.assert_dict_equal(result[1], expected[1])
@pytest.mark.parametrize(
"into, expected",
[
(
dict,
{
0: {"int_col": 1, "float_col": 1.0},
1: {"int_col": 2, "float_col": 2.0},
2: {"int_col": 3, "float_col": 3.0},
},
),
(
OrderedDict,
OrderedDict(
[
(0, {"int_col": 1, "float_col": 1.0}),
(1, {"int_col": 2, "float_col": 2.0}),
(2, {"int_col": 3, "float_col": 3.0}),
]
),
),
(
defaultdict(dict),
defaultdict(
dict,
{
0: {"int_col": 1, "float_col": 1.0},
1: {"int_col": 2, "float_col": 2.0},
2: {"int_col": 3, "float_col": 3.0},
},
),
),
],
)
def test_to_dict_index_dtypes(self, into, expected):
# GH#18580
# When using to_dict(orient='index') on a dataframe with int
# and float columns only the int columns were cast to float
df = DataFrame({"int_col": [1, 2, 3], "float_col": [1.0, 2.0, 3.0]})
result = df.to_dict(orient="index", into=into)
cols = ["int_col", "float_col"]
result = DataFrame.from_dict(result, orient="index")[cols]
expected = DataFrame.from_dict(expected, orient="index")[cols]
tm.assert_frame_equal(result, expected)
def test_to_dict_numeric_names(self):
# GH#24940
df = DataFrame({str(i): [i] for i in range(5)})
result = set(df.to_dict("records")[0].keys())
expected = set(df.columns)
assert result == expected
def test_to_dict_wide(self):
# GH#24939
df = DataFrame({(f"A_{i:d}"): [i] for i in range(256)})
result = df.to_dict("records")[0]
expected = {f"A_{i:d}": i for i in range(256)}
assert result == expected
@pytest.mark.parametrize(
"data,dtype",
(
([True, True, False], bool),
[
[
datetime(2018, 1, 1),
datetime(2019, 2, 2),
datetime(2020, 3, 3),
],
Timestamp,
],
[[1.0, 2.0, 3.0], float],
[[1, 2, 3], int],
[["X", "Y", "Z"], str],
),
)
def test_to_dict_orient_dtype(self, data, dtype):
# GH22620 & GH21256
df = DataFrame({"a": data})
d = df.to_dict(orient="records")
assert all(type(record["a"]) is dtype for record in d)
@pytest.mark.parametrize(
"data,expected_dtype",
(
[np.uint64(2), int],
[np.int64(-9), int],
[np.float64(1.1), float],
[np.bool_(True), bool],
[np.datetime64("2005-02-25"), Timestamp],
),
)
def test_to_dict_scalar_constructor_orient_dtype(self, data, expected_dtype):
# GH22620 & GH21256
df = DataFrame({"a": data}, index=[0])
d = df.to_dict(orient="records")
result = type(d[0]["a"])
assert result is expected_dtype
def test_to_dict_mixed_numeric_frame(self):
# GH 12859
df = DataFrame({"a": [1.0], "b": [9.0]})
result = df.reset_index().to_dict("records")
expected = [{"index": 0, "a": 1.0, "b": 9.0}]
assert result == expected
@pytest.mark.parametrize(
"index",
[
None,
Index(["aa", "bb"]),
Index(["aa", "bb"], name="cc"),
MultiIndex.from_tuples([("a", "b"), ("a", "c")]),
MultiIndex.from_tuples([("a", "b"), ("a", "c")], names=["n1", "n2"]),
],
)
@pytest.mark.parametrize(
"columns",
[
["x", "y"],
Index(["x", "y"]),
Index(["x", "y"], name="z"),
MultiIndex.from_tuples([("x", 1), ("y", 2)]),
MultiIndex.from_tuples([("x", 1), ("y", 2)], names=["z1", "z2"]),
],
)
def test_to_dict_orient_tight(self, index, columns):
df = DataFrame.from_records(
[[1, 3], [2, 4]],
columns=columns,
index=index,
)
roundtrip = DataFrame.from_dict(df.to_dict(orient="tight"), orient="tight")
tm.assert_frame_equal(df, roundtrip)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) Mark Kusch <mark.kusch@silpion.de>
DOCUMENTATION = '''
---
module: keystore
short_description: Manage certificates with Java jks keystore
extends_documentation_fragment: files
description:
- The M(keystore) module allows to install and uninstall certificates in a Java keystore with keytool
options:
state:
description:
- Whether a certificate should be C(present) or C(absent)
required: false
default: present
choices: [present, absent]
path:
description:
- Path to the keystore file beeing managed. Aliases: I(dest)
required: true
default: None
aliases: ['dest', 'name']
create:
description:
- Whether to create a new keystore if it does not exist.
required: false
default: true
alias:
description:
- Alias name or ID for the certificate inside the keystore.
required: true
default: None
crt:
description:
- Path to a file containing the SSL certificate, mandatory when state=present
required: false
default: None
password:
description:
- Password for the keystore
required: true
default: None
keytool:
description:
- Path to a Java keytool for performing operations (required when keytool not in PATH)
required: false
default: None
copy:
description:
- Whether to copy files to the remote host
required: false
default: True
creates:
description:
- A filename, when it already exists, this step will B(not) be run.
# informational: requirements for nodes
requirements: []
author: Mark Kusch
todo:
- implementation for truststore vs keystore
- whether to install pkcs12 and convert to jks with openssl
notes:
- requires keytool either in $PATH or supplied with keytool= argument
- does not allow to install private keys
'''
EXAMPLES = '''
- keystore: state=present path=/etc/app/cacerts owner=foo group=foo mode=0644 alias=foo crt=/tmp/app.crt
- keystore: state=absent dest=/etc/app/cacerts alias=bar
'''
import sys
import os
class Keystore(object):
def __init__(self, module, keytool):
self.module = module
self.state = module.params['state']
self.path = os.path.expanduser(module.params['path'])
self.create = module.boolean(module.params['create'])
self.alias = module.params['alias']
self.crt = os.path.expanduser(module.params['crt'])
self.keytool = keytool
self.password = module.params['password']
self.copy = module.boolean(module.params['copy'])
self.creates = module.params['creates']
self.file_args = module.load_file_common_arguments(module.params)
def exists(self):
return os.path.isfile(self.path)
def is_crt(self):
if self.exists():
cmd = [self.keytool]
cmd.append('-noprompt')
cmd.append('-list')
cmd.append('-keystore')
cmd.append(self.path)
cmd.append('-storepass')
cmd.append(self.password)
cmd.append('-alias')
cmd.append(self.alias)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
return False
def crt_add(self):
if not self.is_crt():
if not self.crt:
self.module.fail_json(name=self.alias, msg='crt is required when adding certificates')
else:
cmd = [self.keytool]
cmd.append('-noprompt')
cmd.append('-keystore')
cmd.append(self.path)
cmd.append('-trustcacerts')
cmd.append('-import')
cmd.append('-file')
cmd.append(self.crt)
cmd.append('-alias')
cmd.append(self.alias)
cmd.append('-storepass')
cmd.append(self.password)
return self.module.run_command(cmd)
def crt_del(self):
if self.is_crt():
cmd = [self.keytool]
cmd.append('-noprompt')
cmd.append('-keystore')
cmd.append(self.path)
cmd.append('-storepass')
cmd.append(self.password)
cmd.append('-alias')
cmd.append(self.alias)
cmd.append('-delete')
return self.module.run_command(cmd)
def set_fs_attributes_if_different(self, changed):
return self.module.set_fs_attributes_if_different(self.file_args, changed)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
path = dict(aliases=['name', 'dest'], required=True, type='str'),
create = dict(default=True, choices=BOOLEANS),
alias = dict(required=True, type='str'),
crt = dict(required=False, default=None, type='str'),
keytool = dict(required=False, default=None, type='str'),
password = dict(required=True, default=None, type='str'),
copy = dict(required=False, choices=BOOLEANS, default=True),
creates = dict(required=False, default=None, type='str'),
),
add_file_common_args=True,
supports_check_mode=True
)
keytool = None
keytool = module.get_bin_path('keytool', False)
if keytool is None and module.params['keytool'] is not None:
if os.path.isfile(module.params['keytool']) and os.access(module.params['keytool'], os.X_OK):
keytool = module.params['keytool']
if keytool is None:
module.fail_json(msg='cannot execute keytool: no such file or directory')
keystore = Keystore(module, keytool)
rc = None
out = ''
err = ''
result = {}
result['path'] = keystore.path
result['state'] = keystore.state
if not os.path.exists(keystore.crt):
if keystore.copy:
module.fail_json(msg="File '%s' failed to transfer" % os.path.basename(keystore.crt))
if not os.access(keystore.crt, os.R_OK):
module.fail_json(msg="File '%s' is not readable" % os.path.basename(keystore.crt))
if keystore.state == 'absent':
if keystore.is_crt():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = keystore.crt_del()
if rc != 0:
module.fail_json(name=keystore.alias, msg=err)
elif keystore.state == 'present':
if not keystore.is_crt():
if not keystore.exists() and not keystore.create:
module.exit_json(changed=False, msg='Not creating new keystore (use create=yes)')
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = keystore.crt_add()
if rc != 0:
module.fail_json(name=keystore.alias, msg=err)
keystore.set_fs_attributes_if_different(rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['out'] = out
if err:
result['err'] = err
module.exit_json(**result)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| |
from __future__ import absolute_import, division, print_function
import re
from functools import partial
from graphviz import Digraph
from .core import istask, get_dependencies, ishashable
from .utils import funcname
def task_label(task):
"""Label for a task on a dot graph.
Examples
--------
>>> from operator import add
>>> task_label((add, 1, 2))
'add'
>>> task_label((add, (add, 1, 2), 3))
'add(...)'
"""
func = task[0]
if hasattr(func, 'funcs'):
if len(func.funcs) > 1:
return '{0}(...)'.format(funcname(func.funcs[0]))
else:
head = funcname(func.funcs[0])
else:
head = funcname(task[0])
if any(has_sub_tasks(i) for i in task[1:]):
return '{0}(...)'.format(head)
else:
return head
def has_sub_tasks(task):
"""Returns True if the task has sub tasks"""
if istask(task):
return True
elif isinstance(task, list):
return any(has_sub_tasks(i) for i in task)
else:
return False
def name(x):
try:
return str(hash(x))
except TypeError:
return str(hash(str(x)))
_HASHPAT = re.compile('([0-9a-z]{32})')
def label(x, cache=None):
"""
>>> label('x')
'x'
>>> label(('x', 1))
"('x', 1)"
>>> from hashlib import md5
>>> x = 'x-%s-hello' % md5(b'1234').hexdigest()
>>> x
'x-81dc9bdb52d04dc20036dbd8313ed055-hello'
>>> label(x)
'x-#-hello'
"""
s = str(x)
m = re.search(_HASHPAT, s)
if m is not None:
for h in m.groups():
if cache is not None:
n = cache.get(h, len(cache))
label = '#{0}'.format(n)
# cache will be overwritten destructively
cache[h] = n
else:
label = '#'
s = s.replace(h, label)
return s
def to_graphviz(dsk, data_attributes=None, function_attributes=None, **kwargs):
if data_attributes is None:
data_attributes = {}
if function_attributes is None:
function_attributes = {}
attributes = {'rankdir': 'BT'}
attributes.update(kwargs)
g = Digraph(graph_attr=attributes)
seen = set()
cache = {}
for k, v in dsk.items():
k_name = name(k)
if k_name not in seen:
seen.add(k_name)
g.node(k_name, label=label(k, cache=cache), shape='box',
**data_attributes.get(k, {}))
if istask(v):
func_name = name((k, 'function'))
if func_name not in seen:
seen.add(func_name)
g.node(func_name, label=task_label(v), shape='circle',
**function_attributes.get(k, {}))
g.edge(func_name, k_name)
for dep in get_dependencies(dsk, k):
dep_name = name(dep)
if dep_name not in seen:
seen.add(dep_name)
g.node(dep_name, label=label(dep, cache=cache), shape='box',
**data_attributes.get(dep, {}))
g.edge(dep_name, func_name)
elif ishashable(v) and v in dsk:
g.edge(name(v), k_name)
return g
IPYTHON_IMAGE_FORMATS = frozenset(['jpeg', 'png'])
IPYTHON_NO_DISPLAY_FORMATS = frozenset(['dot', 'pdf'])
def _get_display_cls(format):
"""
Get the appropriate IPython display class for `format`.
Returns `IPython.display.SVG` if format=='svg', otherwise
`IPython.display.Image`.
If IPython is not importable, return dummy function that swallows its
arguments and returns None.
"""
dummy = lambda *args, **kwargs: None
try:
import IPython.display as display
except ImportError:
# Can't return a display object if no IPython.
return dummy
if format in IPYTHON_NO_DISPLAY_FORMATS:
# IPython can't display this format natively, so just return None.
return dummy
elif format in IPYTHON_IMAGE_FORMATS:
# Partially apply `format` so that `Image` and `SVG` supply a uniform
# interface to the caller.
return partial(display.Image, format=format)
elif format == 'svg':
return display.SVG
else:
raise ValueError("Unknown format '%s' passed to `dot_graph`" % format)
def dot_graph(dsk, filename='mydask', format=None, **kwargs):
"""
Render a task graph using dot.
If `filename` is not None, write a file to disk with that name in the
format specified by `format`. `filename` should not include an extension.
Parameters
----------
dsk : dict
The graph to display.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate with
dot using only pipes. Default is 'mydask'.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
**kwargs
Additional keyword arguments to forward to `to_graphviz`.
Returns
-------
result : None or IPython.display.Image or IPython.display.SVG (See below.)
Notes
-----
If IPython is installed, we return an IPython.display object in the
requested format. If IPython is not installed, we just return None.
We always return None if format is 'pdf' or 'dot', because IPython can't
display these formats natively. Passing these formats with filename=None
will not produce any useful output.
See Also
--------
dask.dot.to_graphviz
"""
g = to_graphviz(dsk, **kwargs)
fmts = ['.png', '.pdf', '.dot', '.svg', '.jpeg', '.jpg']
if format is None and any(filename.lower().endswith(fmt) for fmt in fmts):
format = filename.lower().split('.')[-1]
filename = filename.rsplit('.')[0]
if format is None:
format = 'png'
data = g.pipe(format=format)
display_cls = _get_display_cls(format)
if not filename:
return display_cls(data=data)
full_filename = '.'.join([filename, format])
with open(full_filename, 'wb') as f:
f.write(data)
return _get_display_cls(format)(filename=full_filename)
| |
##
# Special thanks to @krey for the python3 support
##
import numpy as np
import colorsys
import colorlover as cl
import operator
import copy
from collections import deque
from six import string_types
from IPython.display import HTML, display
from .utils import inverseDict
from .auth import get_config_file
class CufflinksError(Exception):
pass
def to_rgba(color, alpha):
"""
Converts from hex|rgb to rgba
Parameters:
-----------
color : string
Color representation on hex or rgb
alpha : float
Value from 0 to 1.0 that represents the
alpha value.
Example:
to_rgba('#E1E5ED',0.6)
to_rgba('#f03',0.7)
to_rgba('rgb(23,23,23)',.5)
"""
if type(color) == tuple:
color, alpha = color
color = color.lower()
if 'rgba' in color:
cl = list(eval(color.replace('rgba', '')))
if alpha:
cl[3] = alpha
return 'rgba' + str(tuple(cl))
elif 'rgb' in color:
r, g, b = eval(color.replace('rgb', ''))
return 'rgba' + str((r, g, b, alpha))
else:
return to_rgba(hex_to_rgb(color), alpha)
def hex_to_rgb(color):
"""
Converts from hex to rgb
Parameters:
-----------
color : string
Color representation on hex or rgb
Example:
hex_to_rgb('#E1E5ED')
hex_to_rgb('#f03')
"""
color = normalize(color)
color = color[1:]
# return 'rgb'+str(tuple(ord(c) for c in color.decode('hex')))
return 'rgb' + str((int(color[0:2], base=16), int(color[2:4], base=16), int(color[4:6], base=16)))
def normalize(color):
"""
Returns an hex color
Parameters:
-----------
color : string
Color representation in rgba|rgb|hex
Example:
normalize('#f03')
"""
if type(color) == tuple:
color = to_rgba(*color)
if 'rgba' in color:
return rgb_to_hex(rgba_to_rgb(color))
elif 'rgb' in color:
return rgb_to_hex(color)
elif '#' in color:
if len(color) == 7:
return color
else:
color = color[1:]
return '#' + ''.join([x * 2 for x in list(color)])
else:
try:
return normalize(cnames[color.lower()])
except:
raise CufflinksError('Not a valid color: ' + color)
def rgb_to_hex(color):
"""
Converts from rgb to hex
Parameters:
-----------
color : string
Color representation on hex or rgb
Example:
rgb_to_hex('rgb(23,25,24)')
"""
rgb = eval(color.replace('rgb', ''))
# return '#'+''.join(map(chr, rgb)).encode('hex')
return '#' + ''.join(['{0:02x}'.format(x).upper() for x in rgb])
def rgba_to_rgb(color, bg='rgb(255,255,255)'):
"""
Converts from rgba to rgb
Parameters:
-----------
color : string
Color representation in rgba
bg : string
Color representation in rgb
Example:
rgba_to_rgb('rgb(23,25,24,.4)''
"""
def c_tup(c):
return eval(c[c.find('('):])
color = c_tup(color)
bg = hex_to_rgb(normalize(bg))
bg = c_tup(bg)
a = color[3]
r = [int((1 - a) * bg[i] + a * color[i]) for i in range(3)]
return 'rgb' + str(tuple(r))
def hex_to_hsv(color):
"""
Converts from hex to hsv
Parameters:
-----------
color : string
Color representation on color
Example:
hex_to_hsv('#ff9933')
"""
color = normalize(color)
color = color[1:]
# color=tuple(ord(c)/255.0 for c in color.decode('hex'))
color = (int(color[0:2], base=16) / 255.0, int(color[2:4],
base=16) / 255.0, int(color[4:6], base=16) / 255.0)
return colorsys.rgb_to_hsv(*color)
def color_range(color, N=20):
"""
Generates a scale of colours from a base colour
Parameters:
-----------
color : string
Color representation in hex
N : int
number of colours to generate
Example:
color_range('#ff9933',20)
"""
color = normalize(color)
org = color
color = hex_to_hsv(color)
HSV_tuples = [(color[0], x, color[2]) for x in np.arange(0, 1, 2.0 / N)]
HSV_tuples.extend([(color[0], color[1], x)
for x in np.arange(0, 1, 2.0 / N)])
hex_out = []
for c in HSV_tuples:
c = colorsys.hsv_to_rgb(*c)
c = [int(_ * 255) for _ in c]
# hex_out.append("#"+"".join([chr(x).encode('hex') for x in c]))
hex_out.append("#" + "".join(['{0:02x}'.format(x) for x in c]))
if org not in hex_out:
hex_out.append(org)
hex_out.sort()
return hex_out
def color_table(color, N=1, sort=False, sort_values=False, inline=False, as_html=False):
"""
Generates a colour table
Parameters:
-----------
color : string | list | dict
Color representation in rgba|rgb|hex
If a list of colors is passed then these
are displayed in a table
N : int
number of colours to generate
When color is not a list then it generaes
a range of N colors
sort : bool
if True then items are sorted
sort_values : bool
if True then items are sorted by color values.
Only applies if color is a dictionary
inline : bool
if True it returns single line color blocks
as_html : bool
if True it returns the HTML code
Example:
color_table('#ff9933')
color_table(cufflinks.cnames)
color_table(['pink','salmon','yellow'])
Note:
This function only works in iPython Notebook
"""
if isinstance(color, list):
c_ = ''
rgb_tup = [normalize(c) for c in color]
if sort:
rgb_tup.sort()
elif isinstance(color, dict):
c_ = ''
items = [(k, normalize(v), hex_to_hsv(normalize(v)))
for k, v in list(color.items())]
if sort_values:
items = sorted(items, key=operator.itemgetter(2))
elif sort:
items = sorted(items, key=operator.itemgetter(0))
rgb_tup = [(k, v) for k, v, _ in items]
else:
c_ = normalize(color)
if N > 1:
rgb_tup = np.array(color_range(c_, N))[::-1]
else:
rgb_tup = [c_]
def _color(c):
if hex_to_hsv(c)[2] < .5:
color = "#ffffff"
shadow = '0 1px 0 #000'
else:
color = "#000000"
shadow = '0 1px 0 rgba(255,255,255,0.6)'
if c == c_:
border = " border: 1px solid #ffffff;"
else:
border = ''
return color, shadow, border
s = '<ul style="list-style-type: none;">' if not inline else ''
for c in rgb_tup:
if isinstance(c, tuple):
k, c = c
k += ' : '
else:
k = ''
if inline:
s += '<div style="background-color:{0};height:20px;width:20px;display:inline-block;"></div>'.format(
c)
else:
color, shadow, border = _color(c)
s += """<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c + """;">
<span style=" text-shadow:""" + shadow + """; color:""" + color + """;">""" + k + c.upper() + """</span>
</li>"""
s += '</ul>' if not inline else ''
if as_html:
return s
return display(HTML(s))
def colorgen(colors=None, n=None, scale=None, theme=None):
"""
Returns a generator with a list of colors
and gradients of those colors
Parameters:
-----------
colors : list(colors)
List of colors to use
Example:
colorgen()
colorgen(['blue','red','pink'])
colorgen(['#f03','rgb(23,25,25)'])
"""
from .themes import THEMES
step = .1
if not colors:
if not scale:
if not theme:
scale = get_config_file()['colorscale']
else:
scale = THEMES[theme]['colorscale']
colors = get_scales(scale)
dq = deque(colors)
if len(dq) == 0:
dq = deque(get_scales('ggplot'))
if n:
step = len(dq) * 0.8 / n if len(dq) * 8 < n else .1
for i in np.arange(.2, 1, step):
for y in dq:
yield to_rgba(y, 1 - i + .2)
dq.rotate(1)
# NEW STUFF
# Color Names
# ---------------------------------
cnames = {'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#3780bf',
'bluegray': '#565656',
'bluepurple': '#6432AB',
'blueviolet': '#8A2BE2',
'brick': '#E24A33',
'brightblue': '#0000FF',
'brightred': '#FF0000',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'charcoal': '#151516',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkgrey': '#A9A9A9',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkslategrey': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'grassgreen': '#32ab60',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'grey': '#808080',
'grey01': '#0A0A0A',
'grey02': '#151516',
'grey03': '#1A1A1C',
'grey04': '#1E1E21',
'grey05': '#252529',
'grey06': '#36363C',
'grey07': '#3C3C42',
'grey08': '#434343',
'grey09': '#666570',
'grey10': '#666666',
'grey11': '#8C8C8C',
'grey12': '#C2C2C2',
'grey13': '#E2E2E2',
'grey14': '#E5E5E5',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'java': '#17BECF',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightpink2': '#fccde5',
'lightpurple': '#bc80bd',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow':'#FAFAD2',
'lightgray': '#D3D3D3',
'lightgreen': '#90EE90',
'lightgrey': '#D3D3D3',
'lightivory': '#F6F6F6',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#B0C4DE',
'lightteal': '#8dd3c7',
'lightyellow': '#FFFFE0',
'lightblue2': '#80b1d3',
'lightviolet': '#8476CA',
'lime': '#00FF00',
'lime2': '#8EBA42',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine':'#66CDAA',
'mediumblue': '#0000CD',
'mediumgray': '#656565',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen':'#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'mustard': '#FBC15E',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#ff9933',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleolive': '#b3de69',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'pearl': '#D9D9D9',
'pearl02': '#F5F6F9',
'pearl03': '#E1E5ED',
'pearl04': '#9499A3',
'pearl05': '#6F7B8B',
'pearl06': '#4D5663',
'peru': '#CD853F',
'pink': '#ff0088',
'pinksalmon': '#FFB5B8',
'plum': '#DDA0DD',
'polar': '#ACAFB5',
'polarblue': '#0080F0',
'polarbluelight': '#46A0F0',
'polarcyan': '#ADFCFC',
'polardark': '#484848',
'polardiv': '#D5D8DB',
'polardust': '#F2F3F7',
'polargrey': '#505050',
'polargreen': '#309054',
'polarorange': '#EE7600',
'polarpurple': '#6262DE',
'polarred': '#D94255',
'powderblue': '#B0E0E6',
'purple': '#800080',
'red': '#db4052',
'rose': '#FFC0CB',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#fb8072',
'sandybrown': '#FAA460',
'seaborn': '#EAE7E4',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'slategrey': '#708090',
'smurf': '#3E6FB0',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#ffff33',
'yellowgreen': '#9ACD32',
"henanigans_bg": "#242424",
"henanigans_blue1": "#5F95DE",
"henanigans_blue2": "#93B6E6",
"henanigans_cyan1": "#7EC4CF",
"henanigans_cyan2": "#B6ECF3",
"henanigans_dark1": "#040404",
"henanigans_dark2": "#141414",
"henanigans_dialog1": "#444459",
"henanigans_dialog2": "#5D5D7A",
"henanigans_green1": "#8BD155",
"henanigans_green2": "#A0D17B",
"henanigans_grey1": "#343434",
"henanigans_grey2": "#444444",
"henanigans_light1": "#A4A4A4",
"henanigans_light2": "#F4F4F4",
"henanigans_orange1": "#EB9E58",
"henanigans_orange2": "#EBB483",
"henanigans_purple1": "#C98FDE",
"henanigans_purple2": "#AC92DE",
"henanigans_red1": "#F77E70",
"henanigans_red2": "#DE958E",
"henanigans_yellow1": "#E8EA7E",
"henanigans_yellow2": "#E9EABE"
}
# Custom Color Scales
# ---------------------------------
_custom_scales = {
'qual': {
# dflt only exists to keep backward compatibility after issue 91
'dflt': ['orange', 'blue', 'grassgreen', 'purple', 'red', 'teal', 'yellow', 'olive', 'salmon', 'lightblue2'],
'original': ['orange', 'blue', 'grassgreen', 'purple', 'red', 'teal', 'yellow', 'olive', 'salmon', 'lightblue2'],
'ggplot': ['brick', 'smurf', 'lightviolet', 'mediumgray', 'mustard', 'lime2', 'pinksalmon'],
'polar': ['polarblue', 'polarorange', 'polargreen', 'polarpurple', 'polarred', 'polarcyan', 'polarbluelight'],
'plotly' : ['rgb(31, 119, 180)', 'rgb(255, 127, 14)', 'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)', 'rgb(227, 119, 194)', 'rgb(127, 127, 127)', 'rgb(188, 189, 34)', 'rgb(23, 190, 207)'],
'henanigans': ['henanigans_cyan2', 'henanigans_red2', 'henanigans_green2', 'henanigans_blue2', 'henanigans_orange2',
'henanigans_purple2', 'henanigans_yellow2', 'henanigans_light2', 'henanigans_cyan1', 'henanigans_red1',
'henanigans_green1', 'henanigans_blue1']
},
'div': {
},
'seq': {
}
}
# ---------------------------------------------------------------
# The below functions are based in colorlover by Jack Parmer
# https://github.com/jackparmer/colorlover/
# ---------------------------------------------------------------
_scales = None
_scales_names = None
def interp(colors, N):
def _interp(colors, N):
try:
return cl.interp(colors, N)
except:
return _interp(colors, N + 1)
c = _interp(colors, N)
return list(map(rgb_to_hex, cl.to_rgb(c)))
def scales(scale=None):
"""
Displays a color scale (HTML)
Parameters:
-----------
scale : str
Color scale name
If no scale name is provided then all scales are returned
(max number for each scale)
If scale='all' then all scale combinations available
will be returned
Example:
scales('accent')
scales('all')
scales()
"""
if scale:
if scale == 'all':
display(HTML(cl.to_html(_scales)))
else:
display(HTML(cl.to_html(get_scales(scale))))
else:
s = ''
keys = list(_scales_names.keys())
keys.sort()
for k in keys:
scale = get_scales(k)
s += '<div style="display:inline-block;padding:10px;"><div>{0}</div>{1}</div>'.format(
k, cl.to_html(scale))
display(HTML(s))
# Scales Dictionary
# ---------------------------------
def reset_scales():
global _scales
global _scales_names
scale_cpy = cl.scales.copy()
# Add custom scales
for k, v in list(_custom_scales.items()):
if v:
for k_, v_ in list(v.items()):
if str(len(v_)) not in scale_cpy:
scale_cpy[str(len(v_))] = {}
scale_cpy[str(len(v_))][k][k_] = [
hex_to_rgb(normalize(_)) for _ in v_]
# Dictionary by Type > Name > N
_scales = {}
for k, v in list(scale_cpy.items()):
for k_, v_ in list(v.items()):
if k_ not in _scales:
_scales[k_] = {}
for k__, v__ in list(v_.items()):
if k__ not in _scales[k_]:
_scales[k_][k__] = {}
_scales[k_][k__][k] = v__
# Dictionary by Name > N
_scales_names = {}
for k, v in list(scale_cpy.items()):
for k_, v_ in list(v.items()):
for k__, v__ in list(v_.items()):
k__ = k__.lower()
if k__ not in _scales_names:
_scales_names[k__] = {}
_scales_names[k__][k] = v__
def get_scales(scale=None, n=None):
"""
Returns a color scale
Parameters:
-----------
scale : str
Color scale name
If the color name is preceded by a minus (-)
then the scale is inversed
n : int
Number of colors
If n < number of colors available for a given scale then
the minimum number will be returned
If n > number of colors available for a given scale then
the maximum number will be returned
Example:
get_scales('accent',8)
get_scales('pastel1')
"""
if scale:
is_reverse = False
if scale[0] == '-':
scale = scale[1:]
is_reverse = True
d = copy.deepcopy(_scales_names[scale.lower()])
keys = list(map(int, list(d.keys())))
cs = None
if n:
if n in keys:
cs = d[str(n)]
elif n < min(keys):
cs = d[str(min(keys))]
if cs is None:
cs = d[str(max(keys))]
if is_reverse:
cs.reverse()
return cs
else:
d = {}
for k, v in list(_scales_names.items()):
if isinstance(v, dict):
keys = list(map(int, list(v.keys())))
d[k] = v[str(max(keys))]
else:
d[k] = v
return d
def get_colorscale(scale):
"""
Returns a color scale to be used for a plotly figure
Parameters:
-----------
scale : str or list
Color scale name
If the color name is preceded by a minus (-)
then the scale is inversed.
Also accepts a list of colors (rgb,rgba,hex)
Example:
get_colorscale('accent')
get_colorscale(['rgb(127,201,127)','rgb(190,174,212)','rgb(253,192,134)'])
"""
if type(scale) in string_types:
scale = get_scales(scale)
else:
if type(scale) != list:
raise Exception(
"scale needs to be either a scale name or list of colors")
cs = [[1.0 * c / (len(scale) - 1), scale[c]] for c in range(len(scale))]
cs.sort()
return cs
reset_scales()
| |
import sublime, sublime_plugin
import importlib
import re
import os
SETTINGS_FILE = "HighlightBuildErrors.sublime-settings"
REGION_KEY_PREFIX = "build_errors_color"
REGION_FLAGS = {
"none": sublime.HIDDEN,
"fill": 0,
"outline": sublime.DRAW_NO_FILL,
"solid_underline": sublime.DRAW_NO_FILL|sublime.DRAW_NO_OUTLINE|sublime.DRAW_SOLID_UNDERLINE,
"stippled_underline": sublime.DRAW_NO_FILL|sublime.DRAW_NO_OUTLINE|sublime.DRAW_STIPPLED_UNDERLINE,
"squiggly_underline": sublime.DRAW_NO_FILL|sublime.DRAW_NO_OUTLINE|sublime.DRAW_SQUIGGLY_UNDERLINE
}
try:
defaultExec = importlib.import_module("Better Build System").BetterBuidSystem
except:
defaultExec = importlib.import_module("Default.exec")
try:
ansiEscape = importlib.import_module("ANSIescape").ansi
except:
pass
g_errors = {}
g_show_errors = True
g_color_configs = []
def plugin_loaded():
settings = sublime.load_settings(SETTINGS_FILE)
settings.add_on_change("colors", load_config)
load_config()
def load_config():
global g_color_configs, g_default_color
settings = sublime.load_settings(SETTINGS_FILE)
g_color_configs = settings.get("colors", [{"color": "sublimelinter.mark.error"}])
for config in g_color_configs:
if "regex" in config:
config["compiled_regex"] = re.compile(config["regex"])
def normalize_path(file_name):
return os.path.normcase(os.path.abspath(file_name))
def update_errors_in_view(view):
global g_color_configs, g_default_color
file_name = view.file_name()
if file_name is None:
return
file_name = normalize_path(file_name)
for idx, config in enumerate(g_color_configs):
region_key = REGION_KEY_PREFIX + str(idx)
scope = config["scope"] if "scope" in config else "invalid"
icon = config["icon"] if "icon" in config else ""
default_display = "fill" if "scope" in config else "none"
display = config["display"] if "display" in config else default_display
if g_show_errors:
regions = [e.get_region(view) for e in g_errors if e.file_name == file_name and e.color_index == idx]
view.add_regions(region_key, regions, scope, icon, REGION_FLAGS[display])
else:
view.erase_regions(region_key)
def update_all_views(window):
for view in window.views():
update_errors_in_view(view)
def remove_errors_in_view(view):
global g_color_configs
for idx, val in enumerate(g_color_configs):
view.erase_regions(REGION_KEY_PREFIX + str(idx))
g_errors.clear()
class ViewEventListener(sublime_plugin.EventListener):
def on_load_async(self, view):
update_errors_in_view(view)
def on_activated_async(self, view):
update_errors_in_view(view)
def on_modified_async(self, view):
if not view.is_dirty():
# Then most likely just reloaded or saved!
update_errors_in_view(view)
def on_window_command(self, window, command, args):
if command == "build":
for view in window.views():
remove_errors_in_view(view)
def get_filename(matchObject):
# only keep last line (i've seen a bad regex that capture several lines)
return normalize_path(matchObject.group(1).splitlines()[-1])
def get_line(matchObject):
if len(matchObject.groups()) < 3:
return None
try:
return int(matchObject.group(2))
except ValueError:
return None
def get_column(matchObject):
# column is optional, the last one is always the message
if len(matchObject.groups()) < 4 or matchObject.group(3) is None:
return None
try:
return int(matchObject.group(3))
except ValueError:
return None
def get_message(matchObject):
if len(matchObject.groups()) < 3:
return None
# column is optional, the last one is always the message
return matchObject.group(len(matchObject.groups()))
class ErrorLine:
def __init__(self, matchObject):
global g_color_configs
# only keep last line (i've seen a bad regex that capture several lines)
self.file_name = get_filename(matchObject);
self.line = get_line(matchObject);
self.column = get_column(matchObject)
self.message = get_message(matchObject)
if self.message == None: return
self.color_index = 0
for config in g_color_configs:
if not "compiled_regex" in config:
break
if config["compiled_regex"].search(self.message):
break
self.color_index = self.color_index+1;
def get_region(self, view):
if self.line is None:
return None
if self.column is None:
point = view.text_point(self.line-1, 0)
return view.full_line(point)
point = view.text_point(self.line-1, self.column-1)
point_class = view.classify(point)
if point_class & (sublime.CLASS_WORD_START|sublime.CLASS_WORD_END):
return view.word(point)
else:
return view.full_line(point)
class ErrorParser:
def __init__(self, pattern):
self.regex = re.compile(pattern, re.MULTILINE)
self.bad_regex = self.regex.groups < 3 or self.regex.groups > 4
if self.bad_regex:
print("Highlight Build Errors plugin warning: invalid configuration\nThe regular expression must capture filename,line,[column,]message\nPlease fix the 'file_regex' in build system configuration.")
def parse(self, text):
if self.bad_regex:
return []
else:
return [ErrorLine(m) for m in self.regex.finditer(text)]
def doHighlighting(self):
output = self.output_view.substr(sublime.Region(0, self.output_view.size()))
error_pattern = self.output_view.settings().get("result_file_regex")
error_parser = ErrorParser(error_pattern)
global g_errors
g_errors = error_parser.parse(output)
update_all_views(self.window)
class ExecCommand(defaultExec.ExecCommand):
def finish(self, proc):
super(ExecCommand, self).finish(proc)
doHighlighting(self)
try:
class AnsiColorBuildCommand(ansiEscape.AnsiColorBuildCommand):
def finish(self, proc):
super(AnsiColorBuildCommand, self).finish(proc)
doHighlighting(self)
except:
pass
class HideBuildErrorsCommand(sublime_plugin.WindowCommand):
def is_enabled(self):
return g_show_errors
def run(self):
global g_show_errors
g_show_errors = False
update_all_views(self.window)
class ShowBuildErrorsCommand(sublime_plugin.WindowCommand):
def is_enabled(self):
return not g_show_errors
def run(self):
global g_show_errors
g_show_errors = True
update_all_views(self.window)
| |
"""distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate an import library for its dll
# - create a def-file for python??.dll
# - create an import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# * llvm-mingw with Clang 11 works
# (lld supports -shared)
import os
import sys
import copy
import shlex
import warnings
from subprocess import check_output
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import (DistutilsExecError, CCompilerError,
CompileError, UnknownFileError)
from distutils.version import LooseVersion, suppress_known_deprecation
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
elif msc_ver == '1600':
# VS2010 / MSVC 10.0
return ['msvcr100']
elif msc_ver == '1700':
# VS2012 / MSVC 11.0
return ['msvcr110']
elif msc_ver == '1800':
# VS2013 / MSVC 12.0
return ['msvcr120']
elif 1900 <= int(msc_ver) < 2000:
# VS2015 / MSVC 14.0
return ['ucrt', 'vcruntime140']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler(UnixCCompiler):
""" Handles the Cygwin port of the GNU C compiler to Windows.
"""
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__(self, verbose=0, dry_run=0, force=0):
super().__init__(verbose, dry_run, force)
status, details = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.cc = os.environ.get('CC', 'gcc')
self.cxx = os.environ.get('CXX', 'g++')
self.linker_dll = self.cc
shared_option = "-shared"
self.set_executables(compiler='%s -mcygwin -O -Wall' % self.cc,
compiler_so='%s -mcygwin -mdll -O -Wall' % self.cc,
compiler_cxx='%s -mcygwin -O -Wall' % self.cxx,
linker_exe='%s -mcygwin' % self.cc,
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
@property
def gcc_version(self):
# Older numpy dependend on this existing to check for ancient
# gcc versions. This doesn't make much sense with clang etc so
# just hardcode to something recent.
# https://github.com/numpy/numpy/pull/20333
warnings.warn(
"gcc_version attribute of CygwinCCompiler is deprecated. "
"Instead of returning actual gcc version a fixed value 11.2.0 is returned.",
DeprecationWarning,
stacklevel=2,
)
with suppress_known_deprecation():
return LooseVersion("11.2.0")
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compiles the source by spawning GCC and windres if needed."""
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError as msg:
raise CompileError(msg)
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
def link(self, target_desc, objects, output_filename, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
"""Link the objects."""
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let ld strip the output file
# (On my machine: 10KiB < stripped_file < ??100KiB
# unstripped_file = stripped_file + XXX KiB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self, target_desc, objects, output_filename,
output_dir, libraries, library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug, extra_preargs, extra_postargs, build_temp,
target_lang)
# -- Miscellaneous methods -----------------------------------------
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""Adds supports for rc and res files."""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
base, ext = os.path.splitext(os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError("unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext in ('.res', '.rc'):
# these need to be compiled to object files
obj_names.append (os.path.join(output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join(output_dir,
base + self.obj_extension))
return obj_names
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(CygwinCCompiler):
""" Handles the Mingw32 port of the GNU C compiler to Windows.
"""
compiler_type = 'mingw32'
def __init__(self, verbose=0, dry_run=0, force=0):
super().__init__ (verbose, dry_run, force)
shared_option = "-shared"
if is_cygwincc(self.cc):
raise CCompilerError(
'Cygwin gcc cannot be used with --compiler=mingw32')
self.set_executables(compiler='%s -O -Wall' % self.cc,
compiler_so='%s -mdll -O -Wall' % self.cc,
compiler_cxx='%s -O -Wall' % self.cxx,
linker_exe='%s' % self.cc,
linker_so='%s %s'
% (self.linker_dll, shared_option))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using an unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation appears amenable to building
extensions with GCC.
Returns a tuple (status, details), where 'status' is one of the following
constants:
- CONFIG_H_OK: all is well, go ahead and compile
- CONFIG_H_NOTOK: doesn't look good
- CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
# if sys.version contains GCC then python was compiled with GCC, and the
# pyconfig.h file should be OK
if "GCC" in sys.version:
return CONFIG_H_OK, "sys.version mentions 'GCC'"
# Clang would also work
if "Clang" in sys.version:
return CONFIG_H_OK, "sys.version mentions 'Clang'"
# let's see if __GNUC__ is mentioned in python.h
fn = sysconfig.get_config_h_filename()
try:
config_h = open(fn)
try:
if "__GNUC__" in config_h.read():
return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
else:
return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
finally:
config_h.close()
except OSError as exc:
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
def is_cygwincc(cc):
'''Try to determine if the compiler that would be used is from cygwin.'''
out_string = check_output(shlex.split(cc) + ['-dumpmachine'])
return out_string.strip().endswith(b'cygwin')
get_versions = None
"""
A stand-in for the previous get_versions() function to prevent failures
when monkeypatched. See pypa/setuptools#2969.
"""
| |
from ToontownGlobals import *
import math
import TTLocalizer
BattleCamFaceOffFov = 30.0
BattleCamFaceOffPos = Point3(0, -10, 4)
BattleCamDefaultPos1 = Point3(0, -16, 3)
BattleCamDefaultHpr1 = Vec3(0, 0, 0)
BattleCamDefaultPos2 = Point3(0, -8.6, 16.5)
BattleCamDefaultHpr2 = Vec3(0, -61, 0)
BattleCamDefaultFov = 80.0
BattleCamMenuFov = 65.0
BattleCamJoinPos = Point3(0, -12, 13)
BattleCamJoinHpr = Vec3(0, -45, 0)
SkipMovie = 0
BaseHp = 15
Tracks = TTLocalizer.BattleGlobalTracks
NPCTracks = TTLocalizer.BattleGlobalNPCTracks
TrackColors = ((211 / 255.0, 148 / 255.0, 255 / 255.0),
(249 / 255.0, 255 / 255.0, 93 / 255.0),
(79 / 255.0, 190 / 255.0, 76 / 255.0),
(93 / 255.0, 108 / 255.0, 239 / 255.0),
(255 / 255.0, 145 / 255.0, 66 / 255.0),
(255 / 255.0, 65 / 255.0, 199 / 255.0),
(255 / 255.0, 255 / 255.0, 255 / 255.0),
(67 / 255.0, 243 / 255.0, 255 / 255.0))
HEAL_TRACK = 0
TRAP_TRACK = 1
LURE_TRACK = 2
SOUND_TRACK = 3
THROW_TRACK = 4
SQUIRT_TRACK = 5
ZAP_TRACK = 6
DROP_TRACK = 7
NPC_RESTOCK_GAGS = 8
NPC_TOONS_HIT = 9
NPC_COGS_MISS = 10
MIN_TRACK_INDEX = 0
MAX_TRACK_INDEX = 7
MIN_LEVEL_INDEX = 0
MAX_LEVEL_INDEX = 6
MAX_UNPAID_LEVEL_INDEX = 4
LAST_REGULAR_GAG_LEVEL = 5
UBER_GAG_LEVEL_INDEX = 6
NUM_GAG_TRACKS = 8
PropTypeToTrackBonus = {AnimPropTypes.Hydrant: SQUIRT_TRACK,
AnimPropTypes.Mailbox: THROW_TRACK,
AnimPropTypes.Trashcan: HEAL_TRACK}
# Experience points needed to unlock the gag at the indexed position
Levels = [[0, 20, 200, 800, 2000, 6000, 10000], # Toon-Up
[0, 20, 100, 800, 2000, 6000, 10000], # Trap
[0, 20, 100, 800, 2000, 6000, 10000], # Lure
[0, 40, 200, 1000, 2500, 7500, 10000], # Sound
[0, 10, 50, 400, 2000, 6000, 10000], # Throw
[0, 10, 50, 400, 2000, 6000, 10000], # Squirt
[0, 20, 100, 500, 2000, 6000, 10000], # Zap
[0, 20, 100, 500, 2000, 6000, 10000]] # Drop
regMaxSkill = 10000
UberSkill = 500
MaxSkill = UberSkill + regMaxSkill
UnpaidMaxSkills = [Levels[0][1] - 1,
Levels[1][1] - 1,
Levels[2][1] - 1,
Levels[3][1] - 1,
Levels[4][4] - 1,
Levels[5][4] - 1,
Levels[6][1] - 1,
Levels[7][1] - 1]
ExperienceCap = 300
def gagIsPaidOnly(track, level):
return Levels[track][level] > UnpaidMaxSkills[track]
def gagIsVelvetRoped(track, level):
if level > 0:
if track in [4, 5]:
if level > 3:
return True
else:
return True
return False
MaxToonAcc = 95
StartingLevel = 0
CarryLimits = (
( # Toon-Up
(10, 0, 0, 0, 0, 0, 0),
(10, 5, 0, 0, 0, 0, 0),
(15, 10, 5, 0, 0, 0, 0),
(20, 15, 10, 5, 0, 0, 0),
(25, 20, 15, 10, 3, 0, 0),
(30, 25, 20, 15, 7, 3, 0),
(30, 25, 20, 15, 7, 3, 1)
),
( # Trap
(5, 0, 0, 0, 0, 0, 0),
(7, 3, 0, 0, 0, 0, 0),
(10, 7, 3, 0, 0, 0, 0),
(15, 10, 7, 3, 0, 0, 0),
(15, 15, 10, 5, 3, 0, 0),
(20, 15, 15, 10, 5, 2, 0),
(20, 15, 15, 10, 5, 2, 1)
),
( # Lure
(10, 0, 0, 0, 0, 0, 0),
(10, 5, 0, 0, 0, 0, 0),
(15, 10, 5, 0, 0, 0, 0),
(20, 15, 10, 5, 0, 0, 0),
(25, 20, 15, 10, 3, 0, 0),
(30, 25, 20, 15, 7, 3, 0),
(30, 25, 20, 15, 7, 3, 1)
),
( # Sound
(10, 0, 0, 0, 0, 0, 0),
(10, 5, 0, 0, 0, 0, 0),
(15, 10, 5, 0, 0, 0, 0),
(20, 15, 10, 5, 0, 0, 0),
(25, 20, 15, 10, 3, 0, 0),
(30, 25, 20, 15, 7, 3, 0),
(30, 25, 20, 15, 7, 3, 1)
),
( # Throw
(10, 0, 0, 0, 0, 0, 0),
(10, 5, 0, 0, 0, 0, 0),
(15, 10, 5, 0, 0, 0, 0),
(20, 15, 10, 5, 0, 0, 0),
(25, 20, 15, 10, 3, 0, 0),
(30, 25, 20, 15, 7, 3, 0),
(30, 25, 20, 15, 7, 3, 1)
),
( # Squirt
(10, 0, 0, 0, 0, 0, 0),
(10, 5, 0, 0, 0, 0, 0),
(15, 10, 5, 0, 0, 0, 0),
(20, 15, 10, 5, 0, 0, 0),
(25, 20, 15, 10, 3, 0, 0),
(30, 25, 20, 15, 7, 3, 0),
(30, 25, 20, 15, 7, 3, 1)
),
( # Zap
(10, 0, 0, 0, 0, 0, 0),
(10, 5, 0, 0, 0, 0, 0),
(15, 10, 5, 0, 0, 0, 0),
(20, 15, 10, 5, 0, 0, 0),
(25, 20, 15, 10, 3, 0, 0),
(30, 25, 20, 15, 7, 3, 0),
(30, 25, 20, 15, 7, 3, 1)
),
( # Drop
(10, 0, 0, 0, 0, 0, 0),
(10, 5, 0, 0, 0, 0, 0),
(15, 10, 5, 0, 0, 0, 0),
(20, 15, 10, 5, 0, 0, 0),
(25, 20, 15, 10, 3, 0, 0),
(30, 25, 20, 15, 7, 3, 0),
(30, 25, 20, 15, 7, 3, 1)
)
)
MaxProps = ((15, 40), (30, 60), (75, 80))
DLF_SKELECOG = 1
DLF_FOREMAN = 2
DLF_VP = 4
DLF_CFO = 8
DLF_SUPERVISOR = 16
DLF_VIRTUAL = 32
DLF_REVIVES = 64
pieNames = ['tart',
'fruitpie-slice',
'creampie-slice',
'fruitpie',
'creampie',
'birthday-cake',
'wedding-cake',
'lawbook']
AvProps = (('feather',
'bullhorn',
'lipstick',
'bamboocane',
'pixiedust',
'baton',
'baton'),
('banana',
'rake',
'marbles',
'quicksand',
'trapdoor',
'tnt',
'traintrack'),
('1dollar',
'smmagnet',
'5dollar',
'bigmagnet',
'10dollar',
'hypnogogs',
'hypnogogs'),
('bikehorn',
'whistle',
'bugle',
'aoogah',
'elephant',
'foghorn',
'singing'),
('cupcake',
'fruitpieslice',
'creampieslice',
'fruitpie',
'creampie',
'cake',
'cake'),
('flower',
'waterglass',
'waterballoon',
'bottle',
'firehose',
'stormcloud',
'stormcloud'),
('flower',
'waterglass',
'waterballoon',
'bottle',
'firehose',
'stormcloud',
'stormcloud'),
('flowerpot',
'sandbag',
'anvil',
'weight',
'safe',
'piano',
'piano'))
AvPropsNew = (('inventory_feather',
'inventory_megaphone',
'inventory_lipstick',
'inventory_bamboo_cane',
'inventory_pixiedust',
'inventory_juggling_cubes',
'inventory_ladder'),
('inventory_bannana_peel',
'inventory_rake',
'inventory_marbles',
'inventory_quicksand_icon',
'inventory_trapdoor',
'inventory_tnt',
'inventory_traintracks'),
('inventory_1dollarbill',
'inventory_small_magnet',
'inventory_5dollarbill',
'inventory_big_magnet',
'inventory_10dollarbill',
'inventory_hypno_goggles',
'inventory_screen'),
('inventory_bikehorn',
'inventory_whistle',
'inventory_bugle',
'inventory_aoogah',
'inventory_elephant',
'inventory_fog_horn',
'inventory_opera_singer'),
('inventory_tart',
'inventory_fruit_pie_slice',
'inventory_cream_pie_slice',
'inventory_fruitpie',
'inventory_creampie',
'inventory_cake',
'inventory_wedding'),
('inventory_squirt_flower',
'inventory_glass_of_water',
'inventory_water_gun',
'inventory_seltzer_bottle',
'inventory_firehose',
'inventory_storm_cloud',
'inventory_geyser'),
('inventory_joybuzzer',
'inventory_carpet',
'inventory_balloon',
'inventory_battery',
'inventory_tazer',
'inventory_tesla',
'inventory_lightning'),
('inventory_flower_pot',
'inventory_sandbag',
'inventory_anvil',
'inventory_weight',
'inventory_safe_box',
'inventory_piano',
'inventory_ship'))
AvPropStrings = TTLocalizer.BattleGlobalAvPropStrings
AvPropStringsSingular = TTLocalizer.BattleGlobalAvPropStringsSingular
AvPropStringsPlural = TTLocalizer.BattleGlobalAvPropStringsPlural
AvPropAccuracy = (
(70, 70, 70, 70, 70, 70, 100),
(0, 0, 0, 0, 0, 0, 0),
(50, 50, 60, 60, 70, 70, 90),
(95, 95, 95, 95, 95, 95, 95),
(75, 75, 75, 75, 75, 75, 75),
(95, 95, 95, 95, 95, 95, 95),
(75, 75, 75, 75, 75, 75, 75),
(50, 50, 50, 50, 50, 50, 50)
)
AvLureBonusAccuracy = (60,
60,
70,
70,
80,
80,
100)
AvTrackAccStrings = TTLocalizer.BattleGlobalAvTrackAccStrings
AvPropDamage = ((((8, 10), (Levels[0][0], Levels[0][1])),
((15, 18), (Levels[0][1], Levels[0][2])),
((25, 30), (Levels[0][2], Levels[0][3])),
((40, 45), (Levels[0][3], Levels[0][4])),
((60, 70), (Levels[0][4], Levels[0][5])),
((90, 120), (Levels[0][5], Levels[0][6])),
((210, 210), (Levels[0][6], MaxSkill))),
(((10, 12), (Levels[1][0], Levels[1][1])),
((18, 20), (Levels[1][1], Levels[1][2])),
((30, 35), (Levels[1][2], Levels[1][3])),
((45, 50), (Levels[1][3], Levels[1][4])),
((60, 70), (Levels[1][4], Levels[1][5])),
((90, 180), (Levels[1][5], Levels[1][6])),
((195, 195), (Levels[1][6], MaxSkill))),
(((0, 0), (0, 0)),
((0, 0), (0, 0)),
((0, 0), (0, 0)),
((0, 0), (0, 0)),
((0, 0), (0, 0)),
((0, 0), (0, 0)),
((0, 0), (0, 0))),
(((3, 4), (Levels[3][0], Levels[3][1])),
((5, 7), (Levels[3][1], Levels[3][2])),
((9, 11), (Levels[3][2], Levels[3][3])),
((14, 16), (Levels[3][3], Levels[3][4])),
((19, 21), (Levels[3][4], Levels[3][5])),
((25, 50), (Levels[3][5], Levels[3][6])),
((90, 90), (Levels[3][6], MaxSkill))),
(((4, 6), (Levels[4][0], Levels[4][1])),
((8, 10), (Levels[4][1], Levels[4][2])),
((14, 17), (Levels[4][2], Levels[4][3])),
((24, 27), (Levels[4][3], Levels[4][4])),
((36, 40), (Levels[4][4], Levels[4][5])),
((48, 100), (Levels[4][5], Levels[4][6])),
((120, 120), (Levels[4][6], MaxSkill))),
(((3, 4), (Levels[5][0], Levels[5][1])),
((6, 8), (Levels[5][1], Levels[5][2])),
((10, 12), (Levels[5][2], Levels[5][3])),
((18, 21), (Levels[5][3], Levels[5][4])),
((27, 30), (Levels[5][4], Levels[5][5])),
((36, 80), (Levels[5][5], Levels[5][6])),
((105, 105), (Levels[5][6], MaxSkill))),
( # Zap
((3, 4), (Levels[6][0], Levels[6][1])),
((5, 7), (Levels[6][1], Levels[6][2])),
((9, 11), (Levels[6][2], Levels[6][3])),
((16, 18), (Levels[6][3], Levels[6][4])),
((24, 30), (Levels[6][4], Levels[6][5])),
((35, 65), (Levels[6][5], Levels[6][6])),
((95, 95), (Levels[6][6], MaxSkill))
),
(((10, 10), (Levels[7][0], Levels[7][1])),
((18, 18), (Levels[7][1], Levels[7][2])),
((30, 30), (Levels[7][2], Levels[7][3])),
((45, 45), (Levels[7][3], Levels[7][4])),
((60, 60), (Levels[7][4], Levels[7][5])),
((85, 170), (Levels[7][5], Levels[7][6])),
((180, 180), (Levels[7][6], MaxSkill))))
ATK_SINGLE_TARGET = 0
ATK_GROUP_TARGET = 1
AvPropTargetCat = ((ATK_SINGLE_TARGET,
ATK_GROUP_TARGET,
ATK_SINGLE_TARGET,
ATK_GROUP_TARGET,
ATK_SINGLE_TARGET,
ATK_GROUP_TARGET,
ATK_GROUP_TARGET),
(ATK_SINGLE_TARGET,
ATK_SINGLE_TARGET,
ATK_SINGLE_TARGET,
ATK_SINGLE_TARGET,
ATK_SINGLE_TARGET,
ATK_SINGLE_TARGET,
ATK_SINGLE_TARGET),
(ATK_GROUP_TARGET,
ATK_GROUP_TARGET,
ATK_GROUP_TARGET,
ATK_GROUP_TARGET,
ATK_GROUP_TARGET,
ATK_GROUP_TARGET,
ATK_GROUP_TARGET),
(ATK_SINGLE_TARGET,
ATK_SINGLE_TARGET,
ATK_SINGLE_TARGET,
ATK_SINGLE_TARGET,
ATK_SINGLE_TARGET,
ATK_SINGLE_TARGET,
ATK_GROUP_TARGET))
AvPropTarget = (0,
3,
0,
2,
3,
3,
3,
3)
NumRoundsLured = [2,
2,
3,
3,
4,
4,
15]
InstaKillChance = [2, 5, 8, 12, 15, 20, 65]
def getAvPropDamage(attackTrack, attackLevel, exp, organicBonus = False, propBonus = False, propAndOrganicBonusStack = False):
if attackTrack == LURE_TRACK:
return NumRoundsLured[attackLevel]
minD = AvPropDamage[attackTrack][attackLevel][0][0]
maxD = AvPropDamage[attackTrack][attackLevel][0][1]
minE = AvPropDamage[attackTrack][attackLevel][1][0]
maxE = AvPropDamage[attackTrack][attackLevel][1][1]
expVal = min(exp, maxE)
expPerHp = float(maxE - minE + 1) / float(maxD - minD + 1)
damage = math.floor((expVal - minE) / expPerHp) + minD
if damage <= 0:
damage = minD
if propAndOrganicBonusStack:
originalDamage = damage
if organicBonus:
damage += getDamageBonus(originalDamage)
if propBonus:
damage += getDamageBonus(originalDamage)
elif organicBonus or propBonus:
damage += getDamageBonus(damage)
return damage
def getDamageBonus(normal):
bonus = int(normal * 0.1)
if bonus < 1 and normal > 0:
bonus = 1
return bonus
def isGroup(track, level):
return AvPropTargetCat[AvPropTarget[track]][level]
def getCreditMultiplier(floorIndex):
return 1 + floorIndex * 0.5
def getFactoryCreditMultiplier(factoryId):
return 2.0
def getFactoryMeritMultiplier(factoryId):
return 4.0
def getMintCreditMultiplier(mintId):
return {CashbotMintIntA: 2.0,
CashbotMintIntB: 2.5,
CashbotMintIntC: 3.0}.get(mintId, 1.0)
def getStageCreditMultiplier(floor):
return getCreditMultiplier(floor)
def getCountryClubCreditMultiplier(countryClubId):
return {BossbotCountryClubIntA: 2.0,
BossbotCountryClubIntB: 2.5,
BossbotCountryClubIntC: 3.0}.get(countryClubId, 1.0)
def getBossBattleCreditMultiplier(battleNumber):
return 1 + battleNumber
def getInvasionMultiplier():
return 2.0
def getMoreXpHolidayMultiplier():
return 2.0
def encodeUber(trackList):
bitField = 0
for trackIndex in range(len(trackList)):
if trackList[trackIndex] > 0:
bitField += pow(2, trackIndex)
return bitField
def decodeUber(flagMask):
if flagMask == 0:
return []
maxPower = 16
workNumber = flagMask
workPower = maxPower
trackList = []
while workPower >= 0:
if workNumber >= pow(2, workPower):
workNumber -= pow(2, workPower)
trackList.insert(0, 1)
else:
trackList.insert(0, 0)
workPower -= 1
endList = len(trackList)
foundOne = 0
while not foundOne:
if trackList[endList - 1] == 0:
trackList.pop(endList - 1)
endList -= 1
else:
foundOne = 1
return trackList
def getUberFlag(flagMask, index):
decode = decodeUber(flagMask)
if index >= len(decode):
return 0
else:
return decode[index]
def getUberFlagSafe(flagMask, index):
if flagMask == 'unknown' or flagMask < 0:
return -1
else:
return getUberFlag(flagMask, index)
| |
"""Tests for the Google Assistant traits."""
from unittest.mock import patch, Mock
import logging
import pytest
from homeassistant.components import (
binary_sensor,
camera,
cover,
fan,
input_boolean,
light,
lock,
media_player,
scene,
script,
sensor,
switch,
vacuum,
group,
alarm_control_panel,
)
from homeassistant.components.climate import const as climate
from homeassistant.components.google_assistant import trait, helpers, const, error
from homeassistant.const import (
STATE_ON,
STATE_OFF,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
ATTR_ENTITY_ID,
SERVICE_TURN_ON,
SERVICE_TURN_OFF,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
ATTR_DEVICE_CLASS,
ATTR_ASSUMED_STATE,
STATE_UNKNOWN,
)
from homeassistant.core import State, DOMAIN as HA_DOMAIN, EVENT_CALL_SERVICE
from homeassistant.util import color
from tests.common import async_mock_service, mock_coro
from . import BASIC_CONFIG, MockConfig
_LOGGER = logging.getLogger(__name__)
REQ_ID = "ff36a3cc-ec34-11e6-b1a0-64510650abcf"
BASIC_DATA = helpers.RequestData(BASIC_CONFIG, "test-agent", REQ_ID, None)
PIN_CONFIG = MockConfig(secure_devices_pin="1234")
PIN_DATA = helpers.RequestData(PIN_CONFIG, "test-agent", REQ_ID, None)
async def test_brightness_light(hass):
"""Test brightness trait support for light domain."""
assert helpers.get_google_type(light.DOMAIN, None) is not None
assert trait.BrightnessTrait.supported(light.DOMAIN, light.SUPPORT_BRIGHTNESS, None)
trt = trait.BrightnessTrait(
hass,
State("light.bla", light.STATE_ON, {light.ATTR_BRIGHTNESS: 243}),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {}
assert trt.query_attributes() == {"brightness": 95}
events = []
hass.bus.async_listen(EVENT_CALL_SERVICE, events.append)
calls = async_mock_service(hass, light.DOMAIN, light.SERVICE_TURN_ON)
await trt.execute(
trait.COMMAND_BRIGHTNESS_ABSOLUTE, BASIC_DATA, {"brightness": 50}, {}
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "light.bla", light.ATTR_BRIGHTNESS_PCT: 50}
assert len(events) == 1
assert events[0].data == {
"domain": "light",
"service": "turn_on",
"service_data": {"brightness_pct": 50, "entity_id": "light.bla"},
}
async def test_camera_stream(hass):
"""Test camera stream trait support for camera domain."""
hass.config.api = Mock(base_url="http://1.1.1.1:8123")
assert helpers.get_google_type(camera.DOMAIN, None) is not None
assert trait.CameraStreamTrait.supported(camera.DOMAIN, camera.SUPPORT_STREAM, None)
trt = trait.CameraStreamTrait(
hass, State("camera.bla", camera.STATE_IDLE, {}), BASIC_CONFIG
)
assert trt.sync_attributes() == {
"cameraStreamSupportedProtocols": ["hls"],
"cameraStreamNeedAuthToken": False,
"cameraStreamNeedDrmEncryption": False,
}
assert trt.query_attributes() == {}
with patch(
"homeassistant.components.camera.async_request_stream",
return_value=mock_coro("/api/streams/bla"),
):
await trt.execute(trait.COMMAND_GET_CAMERA_STREAM, BASIC_DATA, {}, {})
assert trt.query_attributes() == {
"cameraStreamAccessUrl": "http://1.1.1.1:8123/api/streams/bla"
}
async def test_onoff_group(hass):
"""Test OnOff trait support for group domain."""
assert helpers.get_google_type(group.DOMAIN, None) is not None
assert trait.OnOffTrait.supported(group.DOMAIN, 0, None)
trt_on = trait.OnOffTrait(hass, State("group.bla", STATE_ON), BASIC_CONFIG)
assert trt_on.sync_attributes() == {}
assert trt_on.query_attributes() == {"on": True}
trt_off = trait.OnOffTrait(hass, State("group.bla", STATE_OFF), BASIC_CONFIG)
assert trt_off.query_attributes() == {"on": False}
on_calls = async_mock_service(hass, HA_DOMAIN, SERVICE_TURN_ON)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": True}, {})
assert len(on_calls) == 1
assert on_calls[0].data == {ATTR_ENTITY_ID: "group.bla"}
off_calls = async_mock_service(hass, HA_DOMAIN, SERVICE_TURN_OFF)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": False}, {})
assert len(off_calls) == 1
assert off_calls[0].data == {ATTR_ENTITY_ID: "group.bla"}
async def test_onoff_input_boolean(hass):
"""Test OnOff trait support for input_boolean domain."""
assert helpers.get_google_type(input_boolean.DOMAIN, None) is not None
assert trait.OnOffTrait.supported(input_boolean.DOMAIN, 0, None)
trt_on = trait.OnOffTrait(hass, State("input_boolean.bla", STATE_ON), BASIC_CONFIG)
assert trt_on.sync_attributes() == {}
assert trt_on.query_attributes() == {"on": True}
trt_off = trait.OnOffTrait(
hass, State("input_boolean.bla", STATE_OFF), BASIC_CONFIG
)
assert trt_off.query_attributes() == {"on": False}
on_calls = async_mock_service(hass, input_boolean.DOMAIN, SERVICE_TURN_ON)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": True}, {})
assert len(on_calls) == 1
assert on_calls[0].data == {ATTR_ENTITY_ID: "input_boolean.bla"}
off_calls = async_mock_service(hass, input_boolean.DOMAIN, SERVICE_TURN_OFF)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": False}, {})
assert len(off_calls) == 1
assert off_calls[0].data == {ATTR_ENTITY_ID: "input_boolean.bla"}
async def test_onoff_switch(hass):
"""Test OnOff trait support for switch domain."""
assert helpers.get_google_type(switch.DOMAIN, None) is not None
assert trait.OnOffTrait.supported(switch.DOMAIN, 0, None)
trt_on = trait.OnOffTrait(hass, State("switch.bla", STATE_ON), BASIC_CONFIG)
assert trt_on.sync_attributes() == {}
assert trt_on.query_attributes() == {"on": True}
trt_off = trait.OnOffTrait(hass, State("switch.bla", STATE_OFF), BASIC_CONFIG)
assert trt_off.query_attributes() == {"on": False}
on_calls = async_mock_service(hass, switch.DOMAIN, SERVICE_TURN_ON)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": True}, {})
assert len(on_calls) == 1
assert on_calls[0].data == {ATTR_ENTITY_ID: "switch.bla"}
off_calls = async_mock_service(hass, switch.DOMAIN, SERVICE_TURN_OFF)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": False}, {})
assert len(off_calls) == 1
assert off_calls[0].data == {ATTR_ENTITY_ID: "switch.bla"}
async def test_onoff_fan(hass):
"""Test OnOff trait support for fan domain."""
assert helpers.get_google_type(fan.DOMAIN, None) is not None
assert trait.OnOffTrait.supported(fan.DOMAIN, 0, None)
trt_on = trait.OnOffTrait(hass, State("fan.bla", STATE_ON), BASIC_CONFIG)
assert trt_on.sync_attributes() == {}
assert trt_on.query_attributes() == {"on": True}
trt_off = trait.OnOffTrait(hass, State("fan.bla", STATE_OFF), BASIC_CONFIG)
assert trt_off.query_attributes() == {"on": False}
on_calls = async_mock_service(hass, fan.DOMAIN, SERVICE_TURN_ON)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": True}, {})
assert len(on_calls) == 1
assert on_calls[0].data == {ATTR_ENTITY_ID: "fan.bla"}
off_calls = async_mock_service(hass, fan.DOMAIN, SERVICE_TURN_OFF)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": False}, {})
assert len(off_calls) == 1
assert off_calls[0].data == {ATTR_ENTITY_ID: "fan.bla"}
async def test_onoff_light(hass):
"""Test OnOff trait support for light domain."""
assert helpers.get_google_type(light.DOMAIN, None) is not None
assert trait.OnOffTrait.supported(light.DOMAIN, 0, None)
trt_on = trait.OnOffTrait(hass, State("light.bla", STATE_ON), BASIC_CONFIG)
assert trt_on.sync_attributes() == {}
assert trt_on.query_attributes() == {"on": True}
trt_off = trait.OnOffTrait(hass, State("light.bla", STATE_OFF), BASIC_CONFIG)
assert trt_off.query_attributes() == {"on": False}
on_calls = async_mock_service(hass, light.DOMAIN, SERVICE_TURN_ON)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": True}, {})
assert len(on_calls) == 1
assert on_calls[0].data == {ATTR_ENTITY_ID: "light.bla"}
off_calls = async_mock_service(hass, light.DOMAIN, SERVICE_TURN_OFF)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": False}, {})
assert len(off_calls) == 1
assert off_calls[0].data == {ATTR_ENTITY_ID: "light.bla"}
async def test_onoff_media_player(hass):
"""Test OnOff trait support for media_player domain."""
assert helpers.get_google_type(media_player.DOMAIN, None) is not None
assert trait.OnOffTrait.supported(media_player.DOMAIN, 0, None)
trt_on = trait.OnOffTrait(hass, State("media_player.bla", STATE_ON), BASIC_CONFIG)
assert trt_on.sync_attributes() == {}
assert trt_on.query_attributes() == {"on": True}
trt_off = trait.OnOffTrait(hass, State("media_player.bla", STATE_OFF), BASIC_CONFIG)
assert trt_off.query_attributes() == {"on": False}
on_calls = async_mock_service(hass, media_player.DOMAIN, SERVICE_TURN_ON)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": True}, {})
assert len(on_calls) == 1
assert on_calls[0].data == {ATTR_ENTITY_ID: "media_player.bla"}
off_calls = async_mock_service(hass, media_player.DOMAIN, SERVICE_TURN_OFF)
await trt_on.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": False}, {})
assert len(off_calls) == 1
assert off_calls[0].data == {ATTR_ENTITY_ID: "media_player.bla"}
async def test_dock_vacuum(hass):
"""Test dock trait support for vacuum domain."""
assert helpers.get_google_type(vacuum.DOMAIN, None) is not None
assert trait.DockTrait.supported(vacuum.DOMAIN, 0, None)
trt = trait.DockTrait(hass, State("vacuum.bla", vacuum.STATE_IDLE), BASIC_CONFIG)
assert trt.sync_attributes() == {}
assert trt.query_attributes() == {"isDocked": False}
calls = async_mock_service(hass, vacuum.DOMAIN, vacuum.SERVICE_RETURN_TO_BASE)
await trt.execute(trait.COMMAND_DOCK, BASIC_DATA, {}, {})
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "vacuum.bla"}
async def test_startstop_vacuum(hass):
"""Test startStop trait support for vacuum domain."""
assert helpers.get_google_type(vacuum.DOMAIN, None) is not None
assert trait.StartStopTrait.supported(vacuum.DOMAIN, 0, None)
trt = trait.StartStopTrait(
hass,
State(
"vacuum.bla",
vacuum.STATE_PAUSED,
{ATTR_SUPPORTED_FEATURES: vacuum.SUPPORT_PAUSE},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {"pausable": True}
assert trt.query_attributes() == {"isRunning": False, "isPaused": True}
start_calls = async_mock_service(hass, vacuum.DOMAIN, vacuum.SERVICE_START)
await trt.execute(trait.COMMAND_STARTSTOP, BASIC_DATA, {"start": True}, {})
assert len(start_calls) == 1
assert start_calls[0].data == {ATTR_ENTITY_ID: "vacuum.bla"}
stop_calls = async_mock_service(hass, vacuum.DOMAIN, vacuum.SERVICE_STOP)
await trt.execute(trait.COMMAND_STARTSTOP, BASIC_DATA, {"start": False}, {})
assert len(stop_calls) == 1
assert stop_calls[0].data == {ATTR_ENTITY_ID: "vacuum.bla"}
pause_calls = async_mock_service(hass, vacuum.DOMAIN, vacuum.SERVICE_PAUSE)
await trt.execute(trait.COMMAND_PAUSEUNPAUSE, BASIC_DATA, {"pause": True}, {})
assert len(pause_calls) == 1
assert pause_calls[0].data == {ATTR_ENTITY_ID: "vacuum.bla"}
unpause_calls = async_mock_service(hass, vacuum.DOMAIN, vacuum.SERVICE_START)
await trt.execute(trait.COMMAND_PAUSEUNPAUSE, BASIC_DATA, {"pause": False}, {})
assert len(unpause_calls) == 1
assert unpause_calls[0].data == {ATTR_ENTITY_ID: "vacuum.bla"}
async def test_color_setting_color_light(hass):
"""Test ColorSpectrum trait support for light domain."""
assert helpers.get_google_type(light.DOMAIN, None) is not None
assert not trait.ColorSettingTrait.supported(light.DOMAIN, 0, None)
assert trait.ColorSettingTrait.supported(light.DOMAIN, light.SUPPORT_COLOR, None)
trt = trait.ColorSettingTrait(
hass,
State(
"light.bla",
STATE_ON,
{
light.ATTR_HS_COLOR: (20, 94),
light.ATTR_BRIGHTNESS: 200,
ATTR_SUPPORTED_FEATURES: light.SUPPORT_COLOR,
},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {"colorModel": "hsv"}
assert trt.query_attributes() == {
"color": {"spectrumHsv": {"hue": 20, "saturation": 0.94, "value": 200 / 255}}
}
assert trt.can_execute(
trait.COMMAND_COLOR_ABSOLUTE, {"color": {"spectrumRGB": 16715792}}
)
calls = async_mock_service(hass, light.DOMAIN, SERVICE_TURN_ON)
await trt.execute(
trait.COMMAND_COLOR_ABSOLUTE,
BASIC_DATA,
{"color": {"spectrumRGB": 1052927}},
{},
)
assert len(calls) == 1
assert calls[0].data == {
ATTR_ENTITY_ID: "light.bla",
light.ATTR_HS_COLOR: (240, 93.725),
}
await trt.execute(
trait.COMMAND_COLOR_ABSOLUTE,
BASIC_DATA,
{"color": {"spectrumHSV": {"hue": 100, "saturation": 0.50, "value": 0.20}}},
{},
)
assert len(calls) == 2
assert calls[1].data == {
ATTR_ENTITY_ID: "light.bla",
light.ATTR_HS_COLOR: [100, 50],
light.ATTR_BRIGHTNESS: 0.2 * 255,
}
async def test_color_setting_temperature_light(hass):
"""Test ColorTemperature trait support for light domain."""
assert helpers.get_google_type(light.DOMAIN, None) is not None
assert not trait.ColorSettingTrait.supported(light.DOMAIN, 0, None)
assert trait.ColorSettingTrait.supported(
light.DOMAIN, light.SUPPORT_COLOR_TEMP, None
)
trt = trait.ColorSettingTrait(
hass,
State(
"light.bla",
STATE_ON,
{
light.ATTR_MIN_MIREDS: 200,
light.ATTR_COLOR_TEMP: 300,
light.ATTR_MAX_MIREDS: 500,
ATTR_SUPPORTED_FEATURES: light.SUPPORT_COLOR_TEMP,
},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {
"colorTemperatureRange": {"temperatureMinK": 2000, "temperatureMaxK": 5000}
}
assert trt.query_attributes() == {"color": {"temperatureK": 3333}}
assert trt.can_execute(
trait.COMMAND_COLOR_ABSOLUTE, {"color": {"temperature": 400}}
)
calls = async_mock_service(hass, light.DOMAIN, SERVICE_TURN_ON)
with pytest.raises(helpers.SmartHomeError) as err:
await trt.execute(
trait.COMMAND_COLOR_ABSOLUTE,
BASIC_DATA,
{"color": {"temperature": 5555}},
{},
)
assert err.value.code == const.ERR_VALUE_OUT_OF_RANGE
await trt.execute(
trait.COMMAND_COLOR_ABSOLUTE, BASIC_DATA, {"color": {"temperature": 2857}}, {}
)
assert len(calls) == 1
assert calls[0].data == {
ATTR_ENTITY_ID: "light.bla",
light.ATTR_COLOR_TEMP: color.color_temperature_kelvin_to_mired(2857),
}
async def test_color_light_temperature_light_bad_temp(hass):
"""Test ColorTemperature trait support for light domain."""
assert helpers.get_google_type(light.DOMAIN, None) is not None
assert not trait.ColorSettingTrait.supported(light.DOMAIN, 0, None)
assert trait.ColorSettingTrait.supported(
light.DOMAIN, light.SUPPORT_COLOR_TEMP, None
)
trt = trait.ColorSettingTrait(
hass,
State(
"light.bla",
STATE_ON,
{
light.ATTR_MIN_MIREDS: 200,
light.ATTR_COLOR_TEMP: 0,
light.ATTR_MAX_MIREDS: 500,
},
),
BASIC_CONFIG,
)
assert trt.query_attributes() == {}
async def test_scene_scene(hass):
"""Test Scene trait support for scene domain."""
assert helpers.get_google_type(scene.DOMAIN, None) is not None
assert trait.SceneTrait.supported(scene.DOMAIN, 0, None)
trt = trait.SceneTrait(hass, State("scene.bla", scene.STATE), BASIC_CONFIG)
assert trt.sync_attributes() == {}
assert trt.query_attributes() == {}
assert trt.can_execute(trait.COMMAND_ACTIVATE_SCENE, {})
calls = async_mock_service(hass, scene.DOMAIN, SERVICE_TURN_ON)
await trt.execute(trait.COMMAND_ACTIVATE_SCENE, BASIC_DATA, {}, {})
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "scene.bla"}
async def test_scene_script(hass):
"""Test Scene trait support for script domain."""
assert helpers.get_google_type(script.DOMAIN, None) is not None
assert trait.SceneTrait.supported(script.DOMAIN, 0, None)
trt = trait.SceneTrait(hass, State("script.bla", STATE_OFF), BASIC_CONFIG)
assert trt.sync_attributes() == {}
assert trt.query_attributes() == {}
assert trt.can_execute(trait.COMMAND_ACTIVATE_SCENE, {})
calls = async_mock_service(hass, script.DOMAIN, SERVICE_TURN_ON)
await trt.execute(trait.COMMAND_ACTIVATE_SCENE, BASIC_DATA, {}, {})
# We don't wait till script execution is done.
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "script.bla"}
async def test_temperature_setting_climate_onoff(hass):
"""Test TemperatureSetting trait support for climate domain - range."""
assert helpers.get_google_type(climate.DOMAIN, None) is not None
assert trait.TemperatureSettingTrait.supported(climate.DOMAIN, 0, None)
hass.config.units.temperature_unit = TEMP_FAHRENHEIT
trt = trait.TemperatureSettingTrait(
hass,
State(
"climate.bla",
climate.HVAC_MODE_AUTO,
{
ATTR_SUPPORTED_FEATURES: climate.SUPPORT_TARGET_TEMPERATURE_RANGE,
climate.ATTR_HVAC_MODES: [
climate.HVAC_MODE_OFF,
climate.HVAC_MODE_COOL,
climate.HVAC_MODE_HEAT,
climate.HVAC_MODE_HEAT_COOL,
],
climate.ATTR_MIN_TEMP: None,
climate.ATTR_MAX_TEMP: None,
},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {
"availableThermostatModes": "off,cool,heat,heatcool,on",
"thermostatTemperatureUnit": "F",
}
assert trt.can_execute(trait.COMMAND_THERMOSTAT_SET_MODE, {})
calls = async_mock_service(hass, climate.DOMAIN, SERVICE_TURN_ON)
await trt.execute(
trait.COMMAND_THERMOSTAT_SET_MODE, BASIC_DATA, {"thermostatMode": "on"}, {}
)
assert len(calls) == 1
calls = async_mock_service(hass, climate.DOMAIN, SERVICE_TURN_OFF)
await trt.execute(
trait.COMMAND_THERMOSTAT_SET_MODE, BASIC_DATA, {"thermostatMode": "off"}, {}
)
assert len(calls) == 1
async def test_temperature_setting_climate_range(hass):
"""Test TemperatureSetting trait support for climate domain - range."""
assert helpers.get_google_type(climate.DOMAIN, None) is not None
assert trait.TemperatureSettingTrait.supported(climate.DOMAIN, 0, None)
hass.config.units.temperature_unit = TEMP_FAHRENHEIT
trt = trait.TemperatureSettingTrait(
hass,
State(
"climate.bla",
climate.HVAC_MODE_AUTO,
{
climate.ATTR_CURRENT_TEMPERATURE: 70,
climate.ATTR_CURRENT_HUMIDITY: 25,
ATTR_SUPPORTED_FEATURES: climate.SUPPORT_TARGET_TEMPERATURE_RANGE,
climate.ATTR_HVAC_MODES: [
STATE_OFF,
climate.HVAC_MODE_COOL,
climate.HVAC_MODE_HEAT,
climate.HVAC_MODE_AUTO,
],
climate.ATTR_TARGET_TEMP_HIGH: 75,
climate.ATTR_TARGET_TEMP_LOW: 65,
climate.ATTR_MIN_TEMP: 50,
climate.ATTR_MAX_TEMP: 80,
},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {
"availableThermostatModes": "off,cool,heat,auto,on",
"thermostatTemperatureUnit": "F",
}
assert trt.query_attributes() == {
"thermostatMode": "auto",
"thermostatTemperatureAmbient": 21.1,
"thermostatHumidityAmbient": 25,
"thermostatTemperatureSetpointLow": 18.3,
"thermostatTemperatureSetpointHigh": 23.9,
}
assert trt.can_execute(trait.COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE, {})
assert trt.can_execute(trait.COMMAND_THERMOSTAT_SET_MODE, {})
calls = async_mock_service(hass, climate.DOMAIN, climate.SERVICE_SET_TEMPERATURE)
await trt.execute(
trait.COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
BASIC_DATA,
{
"thermostatTemperatureSetpointHigh": 25,
"thermostatTemperatureSetpointLow": 20,
},
{},
)
assert len(calls) == 1
assert calls[0].data == {
ATTR_ENTITY_ID: "climate.bla",
climate.ATTR_TARGET_TEMP_HIGH: 77,
climate.ATTR_TARGET_TEMP_LOW: 68,
}
calls = async_mock_service(hass, climate.DOMAIN, climate.SERVICE_SET_HVAC_MODE)
await trt.execute(
trait.COMMAND_THERMOSTAT_SET_MODE, BASIC_DATA, {"thermostatMode": "cool"}, {}
)
assert len(calls) == 1
assert calls[0].data == {
ATTR_ENTITY_ID: "climate.bla",
climate.ATTR_HVAC_MODE: climate.HVAC_MODE_COOL,
}
with pytest.raises(helpers.SmartHomeError) as err:
await trt.execute(
trait.COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
BASIC_DATA,
{"thermostatTemperatureSetpoint": -100},
{},
)
assert err.value.code == const.ERR_VALUE_OUT_OF_RANGE
hass.config.units.temperature_unit = TEMP_CELSIUS
async def test_temperature_setting_climate_setpoint(hass):
"""Test TemperatureSetting trait support for climate domain - setpoint."""
assert helpers.get_google_type(climate.DOMAIN, None) is not None
assert trait.TemperatureSettingTrait.supported(climate.DOMAIN, 0, None)
hass.config.units.temperature_unit = TEMP_CELSIUS
trt = trait.TemperatureSettingTrait(
hass,
State(
"climate.bla",
climate.HVAC_MODE_COOL,
{
climate.ATTR_HVAC_MODES: [STATE_OFF, climate.HVAC_MODE_COOL],
climate.ATTR_MIN_TEMP: 10,
climate.ATTR_MAX_TEMP: 30,
ATTR_TEMPERATURE: 18,
climate.ATTR_CURRENT_TEMPERATURE: 20,
},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {
"availableThermostatModes": "off,cool,on",
"thermostatTemperatureUnit": "C",
}
assert trt.query_attributes() == {
"thermostatMode": "cool",
"thermostatTemperatureAmbient": 20,
"thermostatTemperatureSetpoint": 18,
}
assert trt.can_execute(trait.COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT, {})
assert trt.can_execute(trait.COMMAND_THERMOSTAT_SET_MODE, {})
calls = async_mock_service(hass, climate.DOMAIN, climate.SERVICE_SET_TEMPERATURE)
with pytest.raises(helpers.SmartHomeError):
await trt.execute(
trait.COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
BASIC_DATA,
{"thermostatTemperatureSetpoint": -100},
{},
)
await trt.execute(
trait.COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
BASIC_DATA,
{"thermostatTemperatureSetpoint": 19},
{},
)
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "climate.bla", ATTR_TEMPERATURE: 19}
async def test_temperature_setting_climate_setpoint_auto(hass):
"""
Test TemperatureSetting trait support for climate domain.
Setpoint in auto mode.
"""
hass.config.units.temperature_unit = TEMP_CELSIUS
trt = trait.TemperatureSettingTrait(
hass,
State(
"climate.bla",
climate.HVAC_MODE_HEAT_COOL,
{
climate.ATTR_HVAC_MODES: [
climate.HVAC_MODE_OFF,
climate.HVAC_MODE_HEAT_COOL,
],
climate.ATTR_MIN_TEMP: 10,
climate.ATTR_MAX_TEMP: 30,
ATTR_TEMPERATURE: 18,
climate.ATTR_CURRENT_TEMPERATURE: 20,
},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {
"availableThermostatModes": "off,heatcool,on",
"thermostatTemperatureUnit": "C",
}
assert trt.query_attributes() == {
"thermostatMode": "heatcool",
"thermostatTemperatureAmbient": 20,
"thermostatTemperatureSetpointHigh": 18,
"thermostatTemperatureSetpointLow": 18,
}
assert trt.can_execute(trait.COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT, {})
assert trt.can_execute(trait.COMMAND_THERMOSTAT_SET_MODE, {})
calls = async_mock_service(hass, climate.DOMAIN, climate.SERVICE_SET_TEMPERATURE)
await trt.execute(
trait.COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
BASIC_DATA,
{"thermostatTemperatureSetpoint": 19},
{},
)
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "climate.bla", ATTR_TEMPERATURE: 19}
async def test_lock_unlock_lock(hass):
"""Test LockUnlock trait locking support for lock domain."""
assert helpers.get_google_type(lock.DOMAIN, None) is not None
assert trait.LockUnlockTrait.supported(lock.DOMAIN, lock.SUPPORT_OPEN, None)
assert trait.LockUnlockTrait.might_2fa(lock.DOMAIN, lock.SUPPORT_OPEN, None)
trt = trait.LockUnlockTrait(
hass, State("lock.front_door", lock.STATE_LOCKED), PIN_CONFIG
)
assert trt.sync_attributes() == {}
assert trt.query_attributes() == {"isLocked": True}
assert trt.can_execute(trait.COMMAND_LOCKUNLOCK, {"lock": True})
calls = async_mock_service(hass, lock.DOMAIN, lock.SERVICE_LOCK)
await trt.execute(trait.COMMAND_LOCKUNLOCK, PIN_DATA, {"lock": True}, {})
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "lock.front_door"}
async def test_lock_unlock_unlock(hass):
"""Test LockUnlock trait unlocking support for lock domain."""
assert helpers.get_google_type(lock.DOMAIN, None) is not None
assert trait.LockUnlockTrait.supported(lock.DOMAIN, lock.SUPPORT_OPEN, None)
trt = trait.LockUnlockTrait(
hass, State("lock.front_door", lock.STATE_LOCKED), PIN_CONFIG
)
assert trt.sync_attributes() == {}
assert trt.query_attributes() == {"isLocked": True}
assert trt.can_execute(trait.COMMAND_LOCKUNLOCK, {"lock": False})
calls = async_mock_service(hass, lock.DOMAIN, lock.SERVICE_UNLOCK)
# No challenge data
with pytest.raises(error.ChallengeNeeded) as err:
await trt.execute(trait.COMMAND_LOCKUNLOCK, PIN_DATA, {"lock": False}, {})
assert len(calls) == 0
assert err.value.code == const.ERR_CHALLENGE_NEEDED
assert err.value.challenge_type == const.CHALLENGE_PIN_NEEDED
# invalid pin
with pytest.raises(error.ChallengeNeeded) as err:
await trt.execute(
trait.COMMAND_LOCKUNLOCK, PIN_DATA, {"lock": False}, {"pin": 9999}
)
assert len(calls) == 0
assert err.value.code == const.ERR_CHALLENGE_NEEDED
assert err.value.challenge_type == const.CHALLENGE_FAILED_PIN_NEEDED
await trt.execute(
trait.COMMAND_LOCKUNLOCK, PIN_DATA, {"lock": False}, {"pin": "1234"}
)
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "lock.front_door"}
# Test without pin
trt = trait.LockUnlockTrait(
hass, State("lock.front_door", lock.STATE_LOCKED), BASIC_CONFIG
)
with pytest.raises(error.SmartHomeError) as err:
await trt.execute(trait.COMMAND_LOCKUNLOCK, BASIC_DATA, {"lock": False}, {})
assert len(calls) == 1
assert err.value.code == const.ERR_CHALLENGE_NOT_SETUP
# Test with 2FA override
with patch(
"homeassistant.components.google_assistant.helpers"
".AbstractConfig.should_2fa",
return_value=False,
):
await trt.execute(trait.COMMAND_LOCKUNLOCK, BASIC_DATA, {"lock": False}, {})
assert len(calls) == 2
async def test_arm_disarm_arm_away(hass):
"""Test ArmDisarm trait Arming support for alarm_control_panel domain."""
assert helpers.get_google_type(alarm_control_panel.DOMAIN, None) is not None
assert trait.ArmDisArmTrait.supported(alarm_control_panel.DOMAIN, 0, None)
assert trait.ArmDisArmTrait.might_2fa(alarm_control_panel.DOMAIN, 0, None)
trt = trait.ArmDisArmTrait(
hass,
State(
"alarm_control_panel.alarm",
STATE_ALARM_ARMED_AWAY,
{alarm_control_panel.ATTR_CODE_ARM_REQUIRED: True},
),
PIN_CONFIG,
)
assert trt.sync_attributes() == {
"availableArmLevels": {
"levels": [
{
"level_name": "armed_home",
"level_values": [
{"level_synonym": ["armed home", "home"], "lang": "en"}
],
},
{
"level_name": "armed_away",
"level_values": [
{"level_synonym": ["armed away", "away"], "lang": "en"}
],
},
{
"level_name": "armed_night",
"level_values": [
{"level_synonym": ["armed night", "night"], "lang": "en"}
],
},
{
"level_name": "armed_custom_bypass",
"level_values": [
{
"level_synonym": ["armed custom bypass", "custom"],
"lang": "en",
}
],
},
{
"level_name": "triggered",
"level_values": [{"level_synonym": ["triggered"], "lang": "en"}],
},
],
"ordered": False,
}
}
assert trt.query_attributes() == {
"isArmed": True,
"currentArmLevel": STATE_ALARM_ARMED_AWAY,
}
assert trt.can_execute(
trait.COMMAND_ARMDISARM, {"arm": True, "armLevel": STATE_ALARM_ARMED_AWAY}
)
calls = async_mock_service(
hass, alarm_control_panel.DOMAIN, alarm_control_panel.SERVICE_ALARM_ARM_AWAY
)
# Test with no secure_pin configured
with pytest.raises(error.SmartHomeError) as err:
trt = trait.ArmDisArmTrait(
hass,
State(
"alarm_control_panel.alarm",
STATE_ALARM_DISARMED,
{alarm_control_panel.ATTR_CODE_ARM_REQUIRED: True},
),
BASIC_CONFIG,
)
await trt.execute(
trait.COMMAND_ARMDISARM,
BASIC_DATA,
{"arm": True, "armLevel": STATE_ALARM_ARMED_AWAY},
{},
)
assert len(calls) == 0
assert err.value.code == const.ERR_CHALLENGE_NOT_SETUP
trt = trait.ArmDisArmTrait(
hass,
State(
"alarm_control_panel.alarm",
STATE_ALARM_DISARMED,
{alarm_control_panel.ATTR_CODE_ARM_REQUIRED: True},
),
PIN_CONFIG,
)
# No challenge data
with pytest.raises(error.ChallengeNeeded) as err:
await trt.execute(
trait.COMMAND_ARMDISARM,
PIN_DATA,
{"arm": True, "armLevel": STATE_ALARM_ARMED_AWAY},
{},
)
assert len(calls) == 0
assert err.value.code == const.ERR_CHALLENGE_NEEDED
assert err.value.challenge_type == const.CHALLENGE_PIN_NEEDED
# invalid pin
with pytest.raises(error.ChallengeNeeded) as err:
await trt.execute(
trait.COMMAND_ARMDISARM,
PIN_DATA,
{"arm": True, "armLevel": STATE_ALARM_ARMED_AWAY},
{"pin": 9999},
)
assert len(calls) == 0
assert err.value.code == const.ERR_CHALLENGE_NEEDED
assert err.value.challenge_type == const.CHALLENGE_FAILED_PIN_NEEDED
# correct pin
await trt.execute(
trait.COMMAND_ARMDISARM,
PIN_DATA,
{"arm": True, "armLevel": STATE_ALARM_ARMED_AWAY},
{"pin": "1234"},
)
assert len(calls) == 1
# Test already armed
with pytest.raises(error.SmartHomeError) as err:
trt = trait.ArmDisArmTrait(
hass,
State(
"alarm_control_panel.alarm",
STATE_ALARM_ARMED_AWAY,
{alarm_control_panel.ATTR_CODE_ARM_REQUIRED: True},
),
PIN_CONFIG,
)
await trt.execute(
trait.COMMAND_ARMDISARM,
PIN_DATA,
{"arm": True, "armLevel": STATE_ALARM_ARMED_AWAY},
{},
)
assert len(calls) == 1
assert err.value.code == const.ERR_ALREADY_ARMED
# Test with code_arm_required False
trt = trait.ArmDisArmTrait(
hass,
State(
"alarm_control_panel.alarm",
STATE_ALARM_DISARMED,
{alarm_control_panel.ATTR_CODE_ARM_REQUIRED: False},
),
PIN_CONFIG,
)
await trt.execute(
trait.COMMAND_ARMDISARM,
PIN_DATA,
{"arm": True, "armLevel": STATE_ALARM_ARMED_AWAY},
{},
)
assert len(calls) == 2
async def test_arm_disarm_disarm(hass):
"""Test ArmDisarm trait Disarming support for alarm_control_panel domain."""
assert helpers.get_google_type(alarm_control_panel.DOMAIN, None) is not None
assert trait.ArmDisArmTrait.supported(alarm_control_panel.DOMAIN, 0, None)
assert trait.ArmDisArmTrait.might_2fa(alarm_control_panel.DOMAIN, 0, None)
trt = trait.ArmDisArmTrait(
hass,
State(
"alarm_control_panel.alarm",
STATE_ALARM_DISARMED,
{alarm_control_panel.ATTR_CODE_ARM_REQUIRED: True},
),
PIN_CONFIG,
)
assert trt.sync_attributes() == {
"availableArmLevels": {
"levels": [
{
"level_name": "armed_home",
"level_values": [
{"level_synonym": ["armed home", "home"], "lang": "en"}
],
},
{
"level_name": "armed_away",
"level_values": [
{"level_synonym": ["armed away", "away"], "lang": "en"}
],
},
{
"level_name": "armed_night",
"level_values": [
{"level_synonym": ["armed night", "night"], "lang": "en"}
],
},
{
"level_name": "armed_custom_bypass",
"level_values": [
{
"level_synonym": ["armed custom bypass", "custom"],
"lang": "en",
}
],
},
{
"level_name": "triggered",
"level_values": [{"level_synonym": ["triggered"], "lang": "en"}],
},
],
"ordered": False,
}
}
assert trt.query_attributes() == {"isArmed": False}
assert trt.can_execute(trait.COMMAND_ARMDISARM, {"arm": False})
calls = async_mock_service(
hass, alarm_control_panel.DOMAIN, alarm_control_panel.SERVICE_ALARM_DISARM
)
# Test without secure_pin configured
with pytest.raises(error.SmartHomeError) as err:
trt = trait.ArmDisArmTrait(
hass,
State(
"alarm_control_panel.alarm",
STATE_ALARM_ARMED_AWAY,
{alarm_control_panel.ATTR_CODE_ARM_REQUIRED: True},
),
BASIC_CONFIG,
)
await trt.execute(trait.COMMAND_ARMDISARM, BASIC_DATA, {"arm": False}, {})
assert len(calls) == 0
assert err.value.code == const.ERR_CHALLENGE_NOT_SETUP
trt = trait.ArmDisArmTrait(
hass,
State(
"alarm_control_panel.alarm",
STATE_ALARM_ARMED_AWAY,
{alarm_control_panel.ATTR_CODE_ARM_REQUIRED: True},
),
PIN_CONFIG,
)
# No challenge data
with pytest.raises(error.ChallengeNeeded) as err:
await trt.execute(trait.COMMAND_ARMDISARM, PIN_DATA, {"arm": False}, {})
assert len(calls) == 0
assert err.value.code == const.ERR_CHALLENGE_NEEDED
assert err.value.challenge_type == const.CHALLENGE_PIN_NEEDED
# invalid pin
with pytest.raises(error.ChallengeNeeded) as err:
await trt.execute(
trait.COMMAND_ARMDISARM, PIN_DATA, {"arm": False}, {"pin": 9999}
)
assert len(calls) == 0
assert err.value.code == const.ERR_CHALLENGE_NEEDED
assert err.value.challenge_type == const.CHALLENGE_FAILED_PIN_NEEDED
# correct pin
await trt.execute(
trait.COMMAND_ARMDISARM, PIN_DATA, {"arm": False}, {"pin": "1234"}
)
assert len(calls) == 1
# Test already disarmed
with pytest.raises(error.SmartHomeError) as err:
trt = trait.ArmDisArmTrait(
hass,
State(
"alarm_control_panel.alarm",
STATE_ALARM_DISARMED,
{alarm_control_panel.ATTR_CODE_ARM_REQUIRED: True},
),
PIN_CONFIG,
)
await trt.execute(trait.COMMAND_ARMDISARM, PIN_DATA, {"arm": False}, {})
assert len(calls) == 1
assert err.value.code == const.ERR_ALREADY_DISARMED
# Cancel arming after already armed will require pin
with pytest.raises(error.SmartHomeError) as err:
trt = trait.ArmDisArmTrait(
hass,
State(
"alarm_control_panel.alarm",
STATE_ALARM_ARMED_AWAY,
{alarm_control_panel.ATTR_CODE_ARM_REQUIRED: False},
),
PIN_CONFIG,
)
await trt.execute(
trait.COMMAND_ARMDISARM, PIN_DATA, {"arm": True, "cancel": True}, {}
)
assert len(calls) == 1
assert err.value.code == const.ERR_CHALLENGE_NEEDED
assert err.value.challenge_type == const.CHALLENGE_PIN_NEEDED
# Cancel arming while pending to arm doesn't require pin
trt = trait.ArmDisArmTrait(
hass,
State(
"alarm_control_panel.alarm",
STATE_ALARM_PENDING,
{alarm_control_panel.ATTR_CODE_ARM_REQUIRED: False},
),
PIN_CONFIG,
)
await trt.execute(
trait.COMMAND_ARMDISARM, PIN_DATA, {"arm": True, "cancel": True}, {}
)
assert len(calls) == 2
async def test_fan_speed(hass):
"""Test FanSpeed trait speed control support for fan domain."""
assert helpers.get_google_type(fan.DOMAIN, None) is not None
assert trait.FanSpeedTrait.supported(fan.DOMAIN, fan.SUPPORT_SET_SPEED, None)
trt = trait.FanSpeedTrait(
hass,
State(
"fan.living_room_fan",
fan.SPEED_HIGH,
attributes={
"speed_list": [
fan.SPEED_OFF,
fan.SPEED_LOW,
fan.SPEED_MEDIUM,
fan.SPEED_HIGH,
],
"speed": "low",
},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {
"availableFanSpeeds": {
"ordered": True,
"speeds": [
{
"speed_name": "off",
"speed_values": [{"speed_synonym": ["stop", "off"], "lang": "en"}],
},
{
"speed_name": "low",
"speed_values": [
{
"speed_synonym": ["slow", "low", "slowest", "lowest"],
"lang": "en",
}
],
},
{
"speed_name": "medium",
"speed_values": [
{"speed_synonym": ["medium", "mid", "middle"], "lang": "en"}
],
},
{
"speed_name": "high",
"speed_values": [
{
"speed_synonym": [
"high",
"max",
"fast",
"highest",
"fastest",
"maximum",
],
"lang": "en",
}
],
},
],
},
"reversible": False,
}
assert trt.query_attributes() == {
"currentFanSpeedSetting": "low",
"on": True,
"online": True,
}
assert trt.can_execute(trait.COMMAND_FANSPEED, params={"fanSpeed": "medium"})
calls = async_mock_service(hass, fan.DOMAIN, fan.SERVICE_SET_SPEED)
await trt.execute(trait.COMMAND_FANSPEED, BASIC_DATA, {"fanSpeed": "medium"}, {})
assert len(calls) == 1
assert calls[0].data == {"entity_id": "fan.living_room_fan", "speed": "medium"}
async def test_modes(hass):
"""Test Mode trait."""
assert helpers.get_google_type(media_player.DOMAIN, None) is not None
assert trait.ModesTrait.supported(
media_player.DOMAIN, media_player.SUPPORT_SELECT_SOURCE, None
)
trt = trait.ModesTrait(
hass,
State(
"media_player.living_room",
media_player.STATE_PLAYING,
attributes={
media_player.ATTR_INPUT_SOURCE_LIST: [
"media",
"game",
"chromecast",
"plex",
],
media_player.ATTR_INPUT_SOURCE: "game",
},
),
BASIC_CONFIG,
)
attribs = trt.sync_attributes()
assert attribs == {
"availableModes": [
{
"name": "input source",
"name_values": [{"name_synonym": ["input source"], "lang": "en"}],
"settings": [
{
"setting_name": "media",
"setting_values": [
{"setting_synonym": ["media", "media mode"], "lang": "en"}
],
},
{
"setting_name": "game",
"setting_values": [
{"setting_synonym": ["game", "game mode"], "lang": "en"}
],
},
{
"setting_name": "chromecast",
"setting_values": [
{"setting_synonym": ["chromecast"], "lang": "en"}
],
},
],
"ordered": False,
}
]
}
assert trt.query_attributes() == {
"currentModeSettings": {"source": "game"},
"on": True,
"online": True,
}
assert trt.can_execute(
trait.COMMAND_MODES,
params={
"updateModeSettings": {
trt.HA_TO_GOOGLE.get(media_player.ATTR_INPUT_SOURCE): "media"
}
},
)
calls = async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_SELECT_SOURCE
)
await trt.execute(
trait.COMMAND_MODES,
BASIC_DATA,
{
"updateModeSettings": {
trt.HA_TO_GOOGLE.get(media_player.ATTR_INPUT_SOURCE): "media"
}
},
{},
)
assert len(calls) == 1
assert calls[0].data == {"entity_id": "media_player.living_room", "source": "media"}
async def test_openclose_cover(hass):
"""Test OpenClose trait support for cover domain."""
assert helpers.get_google_type(cover.DOMAIN, None) is not None
assert trait.OpenCloseTrait.supported(
cover.DOMAIN, cover.SUPPORT_SET_POSITION, None
)
trt = trait.OpenCloseTrait(
hass,
State(
"cover.bla",
cover.STATE_OPEN,
{
cover.ATTR_CURRENT_POSITION: 75,
ATTR_SUPPORTED_FEATURES: cover.SUPPORT_SET_POSITION,
},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {}
assert trt.query_attributes() == {"openPercent": 75}
calls = async_mock_service(hass, cover.DOMAIN, cover.SERVICE_SET_COVER_POSITION)
await trt.execute(trait.COMMAND_OPENCLOSE, BASIC_DATA, {"openPercent": 50}, {})
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "cover.bla", cover.ATTR_POSITION: 50}
async def test_openclose_cover_unknown_state(hass):
"""Test OpenClose trait support for cover domain with unknown state."""
assert helpers.get_google_type(cover.DOMAIN, None) is not None
assert trait.OpenCloseTrait.supported(
cover.DOMAIN, cover.SUPPORT_SET_POSITION, None
)
# No state
trt = trait.OpenCloseTrait(
hass, State("cover.bla", STATE_UNKNOWN, {}), BASIC_CONFIG
)
assert trt.sync_attributes() == {}
with pytest.raises(helpers.SmartHomeError):
trt.query_attributes()
calls = async_mock_service(hass, cover.DOMAIN, cover.SERVICE_OPEN_COVER)
await trt.execute(trait.COMMAND_OPENCLOSE, BASIC_DATA, {"openPercent": 100}, {})
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "cover.bla"}
assert trt.query_attributes() == {"openPercent": 100}
async def test_openclose_cover_assumed_state(hass):
"""Test OpenClose trait support for cover domain."""
assert helpers.get_google_type(cover.DOMAIN, None) is not None
assert trait.OpenCloseTrait.supported(
cover.DOMAIN, cover.SUPPORT_SET_POSITION, None
)
trt = trait.OpenCloseTrait(
hass,
State(
"cover.bla",
cover.STATE_OPEN,
{
ATTR_ASSUMED_STATE: True,
ATTR_SUPPORTED_FEATURES: cover.SUPPORT_SET_POSITION,
},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {}
with pytest.raises(helpers.SmartHomeError):
trt.query_attributes()
calls = async_mock_service(hass, cover.DOMAIN, cover.SERVICE_SET_COVER_POSITION)
await trt.execute(trait.COMMAND_OPENCLOSE, BASIC_DATA, {"openPercent": 40}, {})
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "cover.bla", cover.ATTR_POSITION: 40}
assert trt.query_attributes() == {"openPercent": 40}
async def test_openclose_cover_no_position(hass):
"""Test OpenClose trait support for cover domain."""
assert helpers.get_google_type(cover.DOMAIN, None) is not None
assert trait.OpenCloseTrait.supported(
cover.DOMAIN, cover.SUPPORT_SET_POSITION, None
)
trt = trait.OpenCloseTrait(
hass, State("cover.bla", cover.STATE_OPEN, {}), BASIC_CONFIG
)
assert trt.sync_attributes() == {}
assert trt.query_attributes() == {"openPercent": 100}
calls = async_mock_service(hass, cover.DOMAIN, cover.SERVICE_CLOSE_COVER)
await trt.execute(trait.COMMAND_OPENCLOSE, BASIC_DATA, {"openPercent": 0}, {})
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "cover.bla"}
@pytest.mark.parametrize(
"device_class", (cover.DEVICE_CLASS_DOOR, cover.DEVICE_CLASS_GARAGE)
)
async def test_openclose_cover_secure(hass, device_class):
"""Test OpenClose trait support for cover domain."""
assert helpers.get_google_type(cover.DOMAIN, device_class) is not None
assert trait.OpenCloseTrait.supported(
cover.DOMAIN, cover.SUPPORT_SET_POSITION, device_class
)
assert trait.OpenCloseTrait.might_2fa(
cover.DOMAIN, cover.SUPPORT_SET_POSITION, device_class
)
trt = trait.OpenCloseTrait(
hass,
State(
"cover.bla",
cover.STATE_OPEN,
{
ATTR_DEVICE_CLASS: device_class,
ATTR_SUPPORTED_FEATURES: cover.SUPPORT_SET_POSITION,
cover.ATTR_CURRENT_POSITION: 75,
},
),
PIN_CONFIG,
)
assert trt.sync_attributes() == {}
assert trt.query_attributes() == {"openPercent": 75}
calls = async_mock_service(hass, cover.DOMAIN, cover.SERVICE_SET_COVER_POSITION)
# No challenge data
with pytest.raises(error.ChallengeNeeded) as err:
await trt.execute(trait.COMMAND_OPENCLOSE, PIN_DATA, {"openPercent": 50}, {})
assert len(calls) == 0
assert err.value.code == const.ERR_CHALLENGE_NEEDED
assert err.value.challenge_type == const.CHALLENGE_PIN_NEEDED
# invalid pin
with pytest.raises(error.ChallengeNeeded) as err:
await trt.execute(
trait.COMMAND_OPENCLOSE, PIN_DATA, {"openPercent": 50}, {"pin": "9999"}
)
assert len(calls) == 0
assert err.value.code == const.ERR_CHALLENGE_NEEDED
assert err.value.challenge_type == const.CHALLENGE_FAILED_PIN_NEEDED
await trt.execute(
trait.COMMAND_OPENCLOSE, PIN_DATA, {"openPercent": 50}, {"pin": "1234"}
)
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "cover.bla", cover.ATTR_POSITION: 50}
# no challenge on close
calls = async_mock_service(hass, cover.DOMAIN, cover.SERVICE_CLOSE_COVER)
await trt.execute(trait.COMMAND_OPENCLOSE, PIN_DATA, {"openPercent": 0}, {})
assert len(calls) == 1
assert calls[0].data == {ATTR_ENTITY_ID: "cover.bla"}
@pytest.mark.parametrize(
"device_class",
(
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
),
)
async def test_openclose_binary_sensor(hass, device_class):
"""Test OpenClose trait support for binary_sensor domain."""
assert helpers.get_google_type(binary_sensor.DOMAIN, device_class) is not None
assert trait.OpenCloseTrait.supported(binary_sensor.DOMAIN, 0, device_class)
trt = trait.OpenCloseTrait(
hass,
State("binary_sensor.test", STATE_ON, {ATTR_DEVICE_CLASS: device_class}),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {"queryOnlyOpenClose": True}
assert trt.query_attributes() == {"openPercent": 100}
trt = trait.OpenCloseTrait(
hass,
State("binary_sensor.test", STATE_OFF, {ATTR_DEVICE_CLASS: device_class}),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {"queryOnlyOpenClose": True}
assert trt.query_attributes() == {"openPercent": 0}
async def test_volume_media_player(hass):
"""Test volume trait support for media player domain."""
assert helpers.get_google_type(media_player.DOMAIN, None) is not None
assert trait.VolumeTrait.supported(
media_player.DOMAIN,
media_player.SUPPORT_VOLUME_SET | media_player.SUPPORT_VOLUME_MUTE,
None,
)
trt = trait.VolumeTrait(
hass,
State(
"media_player.bla",
media_player.STATE_PLAYING,
{
media_player.ATTR_MEDIA_VOLUME_LEVEL: 0.3,
media_player.ATTR_MEDIA_VOLUME_MUTED: False,
},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {}
assert trt.query_attributes() == {"currentVolume": 30, "isMuted": False}
calls = async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_VOLUME_SET
)
await trt.execute(trait.COMMAND_SET_VOLUME, BASIC_DATA, {"volumeLevel": 60}, {})
assert len(calls) == 1
assert calls[0].data == {
ATTR_ENTITY_ID: "media_player.bla",
media_player.ATTR_MEDIA_VOLUME_LEVEL: 0.6,
}
async def test_volume_media_player_relative(hass):
"""Test volume trait support for media player domain."""
trt = trait.VolumeTrait(
hass,
State(
"media_player.bla",
media_player.STATE_PLAYING,
{
media_player.ATTR_MEDIA_VOLUME_LEVEL: 0.3,
media_player.ATTR_MEDIA_VOLUME_MUTED: False,
},
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {}
assert trt.query_attributes() == {"currentVolume": 30, "isMuted": False}
calls = async_mock_service(
hass, media_player.DOMAIN, media_player.SERVICE_VOLUME_SET
)
await trt.execute(
trait.COMMAND_VOLUME_RELATIVE,
BASIC_DATA,
{"volumeRelativeLevel": 20, "relativeSteps": 2},
{},
)
assert len(calls) == 1
assert calls[0].data == {
ATTR_ENTITY_ID: "media_player.bla",
media_player.ATTR_MEDIA_VOLUME_LEVEL: 0.5,
}
async def test_temperature_setting_sensor(hass):
"""Test TemperatureSetting trait support for temperature sensor."""
assert (
helpers.get_google_type(sensor.DOMAIN, sensor.DEVICE_CLASS_TEMPERATURE)
is not None
)
assert not trait.TemperatureSettingTrait.supported(
sensor.DOMAIN, 0, sensor.DEVICE_CLASS_HUMIDITY
)
assert trait.TemperatureSettingTrait.supported(
sensor.DOMAIN, 0, sensor.DEVICE_CLASS_TEMPERATURE
)
hass.config.units.temperature_unit = TEMP_FAHRENHEIT
trt = trait.TemperatureSettingTrait(
hass,
State(
"sensor.test", "70", {ATTR_DEVICE_CLASS: sensor.DEVICE_CLASS_TEMPERATURE}
),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {
"queryOnlyTemperatureSetting": True,
"thermostatTemperatureUnit": "F",
}
assert trt.query_attributes() == {"thermostatTemperatureAmbient": 21.1}
hass.config.units.temperature_unit = TEMP_CELSIUS
async def test_humidity_setting_sensor(hass):
"""Test HumiditySetting trait support for humidity sensor."""
assert (
helpers.get_google_type(sensor.DOMAIN, sensor.DEVICE_CLASS_HUMIDITY) is not None
)
assert not trait.HumiditySettingTrait.supported(
sensor.DOMAIN, 0, sensor.DEVICE_CLASS_TEMPERATURE
)
assert trait.HumiditySettingTrait.supported(
sensor.DOMAIN, 0, sensor.DEVICE_CLASS_HUMIDITY
)
trt = trait.HumiditySettingTrait(
hass,
State("sensor.test", "70", {ATTR_DEVICE_CLASS: sensor.DEVICE_CLASS_HUMIDITY}),
BASIC_CONFIG,
)
assert trt.sync_attributes() == {"queryOnlyHumiditySetting": True}
assert trt.query_attributes() == {"humidityAmbientPercent": 70}
with pytest.raises(helpers.SmartHomeError) as err:
await trt.execute(trait.COMMAND_ONOFF, BASIC_DATA, {"on": False}, {})
assert err.value.code == const.ERR_NOT_SUPPORTED
| |
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
"""
Module to handle the work related to the many AWS Lambda Triggers.
"""
import gzip
import json
import base64
from io import BytesIO
import opentracing as ot
from ...log import logger
STR_LAMBDA_TRIGGER = 'lambda.trigger'
def get_context(tracer, event):
# TODO: Search for more types of trigger context
is_proxy_event = is_api_gateway_proxy_trigger(event) or \
is_api_gateway_v2_proxy_trigger(event) or \
is_application_load_balancer_trigger(event)
if is_proxy_event:
return tracer.extract(ot.Format.HTTP_HEADERS, event.get('headers', {}), disable_w3c_trace_context=True)
return tracer.extract(ot.Format.HTTP_HEADERS, event, disable_w3c_trace_context=True)
def is_api_gateway_proxy_trigger(event):
for key in ["resource", "path", "httpMethod"]:
if key not in event:
return False
return True
def is_api_gateway_v2_proxy_trigger(event):
for key in ["version", "requestContext"]:
if key not in event:
return False
if event["version"] != "2.0":
return False
for key in ["apiId", "stage", "http"]:
if key not in event["requestContext"]:
return False
return True
def is_application_load_balancer_trigger(event):
if 'requestContext' in event and 'elb' in event['requestContext']:
return True
return False
def is_cloudwatch_trigger(event):
if "source" in event and 'detail-type' in event:
if event["source"] == 'aws.events' and event['detail-type'] == 'Scheduled Event':
return True
return False
def is_cloudwatch_logs_trigger(event):
if hasattr(event, 'get') and event.get("awslogs", False) is not False:
return True
else:
return False
def is_s3_trigger(event):
if "Records" in event:
if len(event["Records"]) > 0 and event["Records"][0]["eventSource"] == 'aws:s3':
return True
return False
def is_sqs_trigger(event):
if "Records" in event:
if len(event["Records"]) > 0 and event["Records"][0]["eventSource"] == 'aws:sqs':
return True
return False
def read_http_query_params(event):
"""
Used to parse the Lambda QueryString formats.
@param event: lambda event dict
@return: String in the form of "a=b&c=d"
"""
params = []
try:
if event is None or type(event) is not dict:
return ""
mvqsp = event.get('multiValueQueryStringParameters', None)
qsp = event.get('queryStringParameters', None)
if mvqsp is not None and type(mvqsp) is dict:
for key in mvqsp:
params.append("%s=%s" % (key, mvqsp[key]))
return "&".join(params)
elif qsp is not None and type(qsp) is dict:
for key in qsp:
params.append("%s=%s" % (key, qsp[key]))
return "&".join(params)
else:
return ""
except Exception:
logger.debug("read_http_query_params: ", exc_info=True)
return ""
def capture_extra_headers(event, span, extra_headers):
"""
Capture the headers specified in `extra_headers` from `event` and log them
as a tag in the span.
@param event: the lambda event
@param span: the lambda entry span
@param extra_headers: a list of http headers to capture
@return: None
"""
try:
event_headers = event.get("headers", None)
if event_headers is not None:
for custom_header in extra_headers:
for key in event_headers:
if key.lower() == custom_header.lower():
span.set_tag("http.header.%s" % custom_header, event_headers[key])
except Exception:
logger.debug("capture_extra_headers: ", exc_info=True)
def enrich_lambda_span(agent, span, event, context):
"""
Extract the required information about this Lambda run (and the trigger) and store the data
on `span`.
@param agent: the AWSLambdaAgent in use
@param span: the Lambda entry span
@param event: the lambda handler event
@param context: the lambda handler context
@return: None
"""
try:
span.set_tag('lambda.arn', agent.collector.get_fq_arn())
span.set_tag('lambda.name', context.function_name)
span.set_tag('lambda.version', context.function_version)
if event is None or type(event) is not dict:
logger.debug("enrich_lambda_span: bad event %s", type(event))
return
if is_api_gateway_proxy_trigger(event):
logger.debug("Detected as API Gateway Proxy Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:api.gateway')
span.set_tag('http.method', event["httpMethod"])
span.set_tag('http.url', event["path"])
span.set_tag('http.path_tpl', event["resource"])
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_api_gateway_v2_proxy_trigger(event):
logger.debug("Detected as API Gateway v2.0 Proxy Trigger")
reqCtx = event["requestContext"]
# trim optional HTTP method prefix
route_path = event["routeKey"].split(" ", 2)[-1]
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:api.gateway')
span.set_tag('http.method', reqCtx["http"]["method"])
span.set_tag('http.url', reqCtx["http"]["path"])
span.set_tag('http.path_tpl', route_path)
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_application_load_balancer_trigger(event):
logger.debug("Detected as Application Load Balancer Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:application.load.balancer')
span.set_tag('http.method', event["httpMethod"])
span.set_tag('http.url', event["path"])
span.set_tag('http.params', read_http_query_params(event))
if agent.options.extra_http_headers is not None:
capture_extra_headers(event, span, agent.options.extra_http_headers)
elif is_cloudwatch_trigger(event):
logger.debug("Detected as Cloudwatch Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:cloudwatch.events')
span.set_tag('data.lambda.cw.events.id', event['id'])
resources = event['resources']
resource_count = len(event['resources'])
if resource_count > 3:
resources = event['resources'][:3]
span.set_tag('lambda.cw.events.more', True)
else:
span.set_tag('lambda.cw.events.more', False)
report = []
for item in resources:
if len(item) > 200:
item = item[:200]
report.append(item)
span.set_tag('lambda.cw.events.resources', report)
elif is_cloudwatch_logs_trigger(event):
logger.debug("Detected as Cloudwatch Logs Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:cloudwatch.logs')
try:
if 'awslogs' in event and 'data' in event['awslogs']:
data = event['awslogs']['data']
decoded_data = base64.b64decode(data)
decompressed_data = gzip.GzipFile(fileobj=BytesIO(decoded_data)).read()
log_data = json.loads(decompressed_data.decode('utf-8'))
span.set_tag('lambda.cw.logs.group', log_data.get('logGroup', None))
span.set_tag('lambda.cw.logs.stream', log_data.get('logStream', None))
if len(log_data['logEvents']) > 3:
span.set_tag('lambda.cw.logs.more', True)
events = log_data['logEvents'][:3]
else:
events = log_data['logEvents']
event_data = []
for item in events:
msg = item.get('message', None)
if len(msg) > 200:
msg = msg[:200]
event_data.append(msg)
span.set_tag('lambda.cw.logs.events', event_data)
except Exception as e:
span.set_tag('lambda.cw.logs.decodingError', repr(e))
elif is_s3_trigger(event):
logger.debug("Detected as S3 Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:s3')
if "Records" in event:
events = []
for item in event["Records"][:3]:
bucket_name = "Unknown"
if "s3" in item and "bucket" in item["s3"]:
bucket_name = item["s3"]["bucket"]["name"]
object_name = ""
if "s3" in item and "object" in item["s3"]:
object_name = item["s3"]["object"].get("key", "Unknown")
if len(object_name) > 200:
object_name = object_name[:200]
events.append({"event": item['eventName'],
"bucket": bucket_name,
"object": object_name})
span.set_tag('lambda.s3.events', events)
elif is_sqs_trigger(event):
logger.debug("Detected as SQS Trigger")
span.set_tag(STR_LAMBDA_TRIGGER, 'aws:sqs')
if "Records" in event:
events = []
for item in event["Records"][:3]:
events.append({'queue': item['eventSourceARN']})
span.set_tag('lambda.sqs.messages', events)
else:
logger.debug("Detected as Unknown Trigger: %s" % event)
span.set_tag(STR_LAMBDA_TRIGGER, 'unknown')
except Exception:
logger.debug("enrich_lambda_span: ", exc_info=True)
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides access to LDAP servers, along with some basic functionality required for Hue and
User Admin to work seamlessly with LDAP.
"""
import ldap
import ldap.filter
import logging
import re
from django.contrib.auth.models import User
import desktop.conf
from desktop.lib.python_util import CaseInsensitiveDict
LOG = logging.getLogger(__name__)
CACHED_LDAP_CONN = None
class LdapBindException(Exception):
pass
class LdapSearchException(Exception):
pass
def get_connection_from_server(server=None):
ldap_servers = desktop.conf.LDAP.LDAP_SERVERS.get()
if server and ldap_servers:
ldap_config = ldap_servers[server]
else:
ldap_config = desktop.conf.LDAP
return get_connection(ldap_config)
def get_connection(ldap_config):
global CACHED_LDAP_CONN
if CACHED_LDAP_CONN is not None:
return CACHED_LDAP_CONN
ldap_url = ldap_config.LDAP_URL.get()
username = ldap_config.BIND_DN.get()
password = desktop.conf.get_ldap_bind_password(ldap_config)
ldap_cert = ldap_config.LDAP_CERT.get()
search_bind_authentication = ldap_config.SEARCH_BIND_AUTHENTICATION.get()
if ldap_url is None:
raise Exception('No LDAP URL was specified')
if search_bind_authentication:
return LdapConnection(ldap_config, ldap_url, username, password, ldap_cert)
else:
return LdapConnection(ldap_config, ldap_url, get_ldap_username(username, ldap_config.NT_DOMAIN.get()), password, ldap_cert)
def get_auth(ldap_config):
ldap_url = ldap_config.LDAP_URL.get()
if ldap_url is None:
raise Exception('No LDAP URL was specified')
username = ldap_config.BIND_DN.get()
password = ldap_config.BIND_PASSWORD.get()
if not password:
password = ldap_config.BIND_PASSWORD_SCRIPT.get()
ldap_cert = ldap_config.LDAP_CERT.get()
search_bind_authentication = ldap_config.SEARCH_BIND_AUTHENTICATION.get()
if search_bind_authentication:
ldap_conf = (ldap_url, username, password, ldap_cert)
else:
ldap_conf = (ldap_url, get_ldap_username(username, ldap_config.NT_DOMAIN.get()), password, ldap_cert)
return ldap_conf
def get_connection(ldap_config):
global CACHED_LDAP_CONN
if CACHED_LDAP_CONN is not None:
return CACHED_LDAP_CONN
search_bind_authentication = ldap_config.SEARCH_BIND_AUTHENTICATION.get()
if search_bind_authentication:
ldap_obj = LdapConnection(ldap_config, *get_auth(ldap_config))
else:
ldap_obj = LdapConnection(ldap_config, *get_auth(ldap_config))
return ldap_obj
def get_ldap_username(username, nt_domain):
if nt_domain:
return '%s@%s' % (username, nt_domain)
else:
return username
def get_ldap_user_kwargs(username):
if desktop.conf.LDAP.IGNORE_USERNAME_CASE.get():
return {
'username__iexact': username
}
else:
return {
'username': username
}
def get_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
return User.objects.get(**username_kwargs)
def get_or_create_ldap_user(username):
username_kwargs = get_ldap_user_kwargs(username)
users = User.objects.filter(**username_kwargs)
if users.exists():
return User.objects.get(**username_kwargs), False
else:
if desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get():
username = username.lower()
elif desktop.conf.LDAP.FORCE_USERNAME_UPPERCASE.get():
username = username.upper()
return User.objects.create(username=username), True
class LdapConnection(object):
"""
Constructor creates LDAP connection. Contains methods
to easily query an LDAP server.
"""
def __init__(self, ldap_config, ldap_url, bind_user=None, bind_password=None, cert_file=None):
"""
Constructor initializes the LDAP connection
"""
self.ldap_config = ldap_config
self._ldap_url = ldap_url
self._username = bind_user
self._ldap_cert = cert_file
if cert_file is not None:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, cert_file)
if self.ldap_config.FOLLOW_REFERRALS.get():
ldap.set_option(ldap.OPT_REFERRALS, 1)
else:
ldap.set_option(ldap.OPT_REFERRALS, 0)
if ldap_config.DEBUG.get():
ldap.set_option(ldap.OPT_DEBUG_LEVEL, ldap_config.DEBUG_LEVEL.get())
self.ldap_handle = ldap.initialize(uri=ldap_url, trace_level=ldap_config.TRACE_LEVEL.get())
if bind_user:
try:
self.ldap_handle.simple_bind_s(bind_user, bind_password)
except:
msg = "Failed to bind to LDAP server as user %s" % bind_user
LOG.exception(msg)
raise LdapBindException(msg)
else:
try:
# Do anonymous bind
self.ldap_handle.simple_bind_s('','')
except:
msg = "Failed to bind to LDAP server anonymously"
LOG.exception(msg)
raise LdapBindException(msg)
def _get_search_params(self, name, attr, find_by_dn=False):
"""
if we are to find this ldap object by full distinguished name,
then search by setting search_dn to the 'name'
rather than by filtering by 'attr'.
"""
base_dn = self._get_root_dn()
if find_by_dn:
search_dn = re.sub(r'(\w+=)', lambda match: match.group(0).upper(), name)
if not search_dn.upper().endswith(base_dn.upper()):
raise LdapSearchException("Distinguished Name provided does not contain configured Base DN. Base DN: %(base_dn)s, DN: %(dn)s" % {
'base_dn': base_dn,
'dn': search_dn
})
return (search_dn, '')
else:
return (base_dn, '(' + attr + '=' + name + ')')
@classmethod
def _transform_find_user_results(cls, result_data, user_name_attr):
"""
:param result_data: List of dictionaries that have ldap attributes and their associated values. Generally the result list from an ldapsearch request.
:param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary.
:returns list of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'username': <ldap attribute associated with user_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
"""
user_info = []
if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if user_name_attr not in data:
LOG.warn('Could not find %s in ldap attributes' % user_name_attr)
continue
ldap_info = {
'dn': dn,
'username': data[user_name_attr][0]
}
if 'givenName' in data:
if len(data['givenName'][0]) > 30:
LOG.warn('First name is truncated to 30 characters for [<User: %s>].' % ldap_info['username'])
ldap_info['first'] = data['givenName'][0][:30]
if 'sn' in data:
if len(data['sn'][0]) > 30:
LOG.warn('Last name is truncated to 30 characters for [<User: %s>].' % ldap_info['username'])
ldap_info['last'] = data['sn'][0][:30]
if 'mail' in data:
ldap_info['email'] = data['mail'][0]
# memberOf and isMemberOf should be the same if they both exist
if 'memberOf' in data:
ldap_info['groups'] = data['memberOf']
if 'isMemberOf' in data:
ldap_info['groups'] = data['isMemberOf']
user_info.append(ldap_info)
return user_info
def _transform_find_group_results(self, result_data, group_name_attr, group_member_attr):
group_info = []
if result_data:
for dn, data in result_data:
# Skip Active Directory # refldap entries.
if dn is not None:
# Case insensitivity
data = CaseInsensitiveDict.from_dict(data)
# Skip unnamed entries.
if group_name_attr not in data:
LOG.warn('Could not find %s in ldap attributes' % group_name_attr)
continue
group_name = data[group_name_attr][0]
if desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get():
group_name = group_name.lower()
elif desktop.conf.LDAP.FORCE_USERNAME_UPPERCASE.get():
group_name = group_name.upper()
ldap_info = {
'dn': dn,
'name': group_name
}
if group_member_attr in data and 'posixGroup' not in data['objectClass']:
ldap_info['members'] = data[group_member_attr]
else:
ldap_info['members'] = []
if 'posixGroup' in data['objectClass'] and 'memberUid' in data:
ldap_info['posix_members'] = data['memberUid']
else:
ldap_info['posix_members'] = []
group_info.append(ldap_info)
return group_info
def find_users(self, username_pattern, search_attr=None, user_name_attr=None, user_filter=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
"""
LDAP search helper method finding users. This supports searching for users
by distinguished name, or the configured username attribute.
:param username_pattern: The pattern to match ``search_attr`` against. Defaults to ``search_attr`` if none.
:param search_attr: The ldap attribute to search for ``username_pattern``. Defaults to LDAP -> USERS -> USER_NAME_ATTR config value.
:param user_name_attr: The ldap attribute that is returned by the server to map to ``username`` in the return dictionary.
:param find_by_dn: Search by distinguished name.
:param scope: ldapsearch scope.
:returns: List of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'username': <ldap attribute associated with user_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
``
"""
if not search_attr:
search_attr = self.ldap_config.USERS.USER_NAME_ATTR.get()
if not user_name_attr:
user_name_attr = search_attr
if not user_filter:
user_filter = self.ldap_config.USERS.USER_FILTER.get()
if not user_filter.startswith('('):
user_filter = '(' + user_filter + ')'
# Allow wild cards on non distinguished names
sanitized_name = ldap.filter.escape_filter_chars(username_pattern).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
sanitized_name = sanitized_name.replace(r'\5c,', r'\2c')
search_dn, user_name_filter = self._get_search_params(sanitized_name, search_attr, find_by_dn)
ldap_filter = user_filter
if user_name_filter:
if ldap_filter.lower() in ('(objectclass=*)', 'objectclass=*'):
ldap_filter = ''
ldap_filter = '(&' + ldap_filter + user_name_filter + ')'
attrlist = ['objectClass', 'isMemberOf', 'memberOf', 'givenName', 'sn', 'mail', 'dn', user_name_attr]
self._search_dn = search_dn
self._ldap_filter = ldap_filter
self._attrlist = attrlist
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return self._transform_find_user_results(result_data, user_name_attr)
else:
return []
def find_groups(self, groupname_pattern, search_attr=None, group_name_attr=None, group_member_attr=None, group_filter=None, find_by_dn=False, scope=ldap.SCOPE_SUBTREE):
"""
LDAP search helper method for finding groups
:param groupname_pattern: The pattern to match ``search_attr`` against. Defaults to ``search_attr`` if none.
:param search_attr: The ldap attribute to search for ``groupname_pattern``. Defaults to LDAP -> GROUPS -> GROUP_NAME_ATTR config value.
:param group_name_attr: The ldap attribute that is returned by the server to map to ``name`` in the return dictionary.
:param find_by_dn: Search by distinguished name.
:param scope: ldapsearch scope.
:returns: List of dictionaries that take on the following form: {
'dn': <distinguished name of entry>,
'name': <ldap attribute associated with group_name_attr>
'first': <first name>
'last': <last name>
'email': <email>
'groups': <list of DNs of groups that user is a member of>
}
"""
if not search_attr:
search_attr = self.ldap_config.GROUPS.GROUP_NAME_ATTR.get()
if not group_name_attr:
group_name_attr = search_attr
if not group_member_attr:
group_member_attr = self.ldap_config.GROUPS.GROUP_MEMBER_ATTR.get()
if not group_filter:
group_filter = self.ldap_config.GROUPS.GROUP_FILTER.get()
if not group_filter.startswith('('):
group_filter = '(' + group_filter + ')'
# Allow wild cards on non distinguished names
sanitized_name = ldap.filter.escape_filter_chars(groupname_pattern).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
sanitized_name = sanitized_name.replace(r'\5c,', r'\2c')
search_dn, group_name_filter = self._get_search_params(sanitized_name, search_attr, find_by_dn)
ldap_filter = '(&' + group_filter + group_name_filter + ')'
attrlist = ['objectClass', 'dn', 'memberUid', group_member_attr, group_name_attr]
self._search_dn = search_dn
self._ldap_filter = ldap_filter
self._attrlist = attrlist
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return self._transform_find_group_results(result_data, group_name_attr, group_member_attr)
else:
return []
def find_members_of_group(self, dn, search_attr, ldap_filter, scope=ldap.SCOPE_SUBTREE):
if ldap_filter and not ldap_filter.startswith('('):
ldap_filter = '(' + ldap_filter + ')'
# Allow wild cards on non distinguished names
dn = ldap.filter.escape_filter_chars(dn).replace(r'\2a', r'*')
# Fix issue where \, is converted to \5c,
dn = dn.replace(r'\5c,', r'\2c')
search_dn, _ = self._get_search_params(dn, search_attr)
ldap_filter = '(&%(ldap_filter)s(|(isMemberOf=%(group_dn)s)(memberOf=%(group_dn)s)))' % {'group_dn': dn, 'ldap_filter': ldap_filter}
attrlist = ['objectClass', 'isMemberOf', 'memberOf', 'givenName', 'sn', 'mail', 'dn', search_attr]
self._search_dn = search_dn
self._ldap_filter = ldap_filter
self._attrlist = attrlist
ldap_result_id = self.ldap_handle.search(search_dn, scope, ldap_filter, attrlist)
result_type, result_data = self.ldap_handle.result(ldap_result_id)
if result_type == ldap.RES_SEARCH_RESULT:
return result_data
else:
return []
def find_users_of_group(self, dn):
ldap_filter = self.ldap_config.USERS.USER_FILTER.get()
name_attr = self.ldap_config.USERS.USER_NAME_ATTR.get()
result_data = self.find_members_of_group(dn, name_attr, ldap_filter)
return self._transform_find_user_results(result_data, name_attr)
def find_groups_of_group(self, dn):
ldap_filter = self.ldap_config.GROUPS.GROUP_FILTER.get()
name_attr = self.ldap_config.GROUPS.GROUP_NAME_ATTR.get()
member_attr = self.ldap_config.GROUPS.GROUP_MEMBER_ATTR.get()
result_data = self.find_members_of_group(dn, name_attr, ldap_filter)
return self._transform_find_group_results(result_data, name_attr, member_attr)
def _get_root_dn(self):
return self.ldap_config.BASE_DN.get()
def ldapsearch_cmd(self):
ldapsearch = 'ldapsearch -x -LLL -H {ldap_url} -D "{binddn}" -w "********" -b "{base}" ' \
'"{filterstring}" {attr}'.format(ldap_url=self._ldap_url,
binddn=self._username,
base=self._search_dn,
filterstring=self._ldap_filter,
attr=" ".join(self._attrlist))
return ldapsearch
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.