gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_virtualnetwork
version_added: "2.1"
short_description: Manage Azure virtual networks.
description:
- Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges
and setting custom DNS servers. Use the azure_rm_subnet module to associate subnets with a virtual network.
options:
resource_group:
description:
- name of resource group.
required: true
address_prefixes_cidr:
description:
- List of IPv4 address ranges where each is formatted using CIDR notation. Required when creating
a new virtual network or using purge_address_prefixes.
aliases:
- address_prefixes
default: null
required: false
dns_servers:
description:
- Custom list of DNS servers. Maximum length of two. The first server in the list will be treated
as the Primary server. This is an explicit list. Existing DNS servers will be replaced with the
specified list. Use the purge_dns_servers option to remove all custom DNS servers and revert to
default Azure servers.
default: null
required: false
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
name:
description:
- name of the virtual network.
required: true
purge_address_prefixes:
description:
- Use with state present to remove any existing address_prefixes.
default: false
purge_dns_servers:
description:
- Use with state present to remove existing DNS servers, reverting to default Azure servers. Mutually
exclusive with dns_servers.
default: false
required: false
state:
description:
- Assert the state of the virtual network. Use 'present' to create or update and
'absent' to delete.
default: present
choices:
- absent
- present
required: false
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create a virtual network
azure_rm_virtualnetwork:
name: foobar
resource_group: Testing
address_prefixes_cidr:
- "10.1.0.0/16"
- "172.100.0.0/16"
dns_servers:
- "127.0.0.1"
- "127.0.0.2"
tags:
testing: testing
delete: on-exit
- name: Delete a virtual network
azure_rm_virtualnetwork:
name: foobar
resource_group: Testing
state: absent
'''
RETURN = '''
state:
description: Current state of the virtual network.
returned: always
type: dict
sample: {
"address_prefixes": [
"10.1.0.0/16",
"172.100.0.0/16"
],
"dns_servers": [
"127.0.0.1",
"127.0.0.3"
],
"etag": 'W/"0712e87c-f02f-4bb3-8b9e-2da0390a3886"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/my_test_network",
"location": "eastus",
"name": "my_test_network",
"provisioning_state": "Succeeded",
"tags": null,
"type": "Microsoft.Network/virtualNetworks"
}
'''
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.network.models import VirtualNetwork, AddressSpace, DhcpOptions
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN
def virtual_network_to_dict(vnet):
'''
Convert a virtual network object to a dict.
:param vnet: VirtualNet object
:return: dict
'''
results = dict(
id=vnet.id,
name=vnet.name,
location=vnet.location,
type=vnet.type,
tags=vnet.tags,
provisioning_state=vnet.provisioning_state,
etag=vnet.etag
)
if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
results['dns_servers'] = []
for server in vnet.dhcp_options.dns_servers:
results['dns_servers'].append(server)
if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:
results['address_prefixes'] = []
for space in vnet.address_space.address_prefixes:
results['address_prefixes'].append(space)
return results
class AzureRMVirtualNetwork(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']),
dns_servers=dict(type='list',),
purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']),
purge_dns_servers=dict(type='bool', default=False),
)
mutually_exclusive = [
('dns_servers', 'purge_dns_servers')
]
required_if = [
('purge_address_prefixes', True, ['address_prefixes_cidr'])
]
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.address_prefixes_cidr = None
self.purge_address_prefixes = None
self.dns_servers = None
self.purge_dns_servers = None
self.results=dict(
changed=False,
state=dict()
)
super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
self.results['check_mode'] = self.check_mode
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.state == 'present' and self.purge_address_prefixes:
for prefix in self.address_prefixes_cidr:
if not CIDR_PATTERN.match(prefix):
self.fail("Parameter error: invalid address prefix value {0}".format(prefix))
if self.dns_servers and len(self.dns_servers) > 2:
self.fail("Parameter error: You can provide a maximum of 2 DNS servers.")
changed = False
results = dict()
try:
self.log('Fetching vnet {0}'.format(self.name))
vnet = self.network_client.virtual_networks.get(self.resource_group, self.name)
results = virtual_network_to_dict(vnet)
self.log('Vnet exists {0}'.format(self.name))
self.log(results, pretty_print=True)
self.check_provisioning_state(vnet, self.state)
if self.state == 'present':
if self.address_prefixes_cidr:
existing_address_prefix_set = set(vnet.address_space.address_prefixes)
requested_address_prefix_set = set(self.address_prefixes_cidr)
missing_prefixes = requested_address_prefix_set - existing_address_prefix_set
extra_prefixes = existing_address_prefix_set - requested_address_prefix_set
if len(missing_prefixes) > 0:
self.log('CHANGED: there are missing address_prefixes')
changed = True
if not self.purge_address_prefixes:
# add the missing prefixes
for prefix in missing_prefixes:
results['address_prefixes'].append(prefix)
if len(extra_prefixes) > 0 and self.purge_address_prefixes:
self.log('CHANGED: there are address_prefixes to purge')
changed = True
# replace existing address prefixes with requested set
results['address_prefixes'] = self.address_prefixes_cidr
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
if self.dns_servers:
existing_dns_set = set(vnet.dhcp_options.dns_servers)
requested_dns_set = set(self.dns_servers)
if existing_dns_set != requested_dns_set:
self.log('CHANGED: replacing DNS servers')
changed = True
results['dns_servers'] = self.dns_servers
if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
self.log('CHANGED: purging existing DNS servers')
changed = True
results['dns_servers'] = []
elif self.state == 'absent':
self.log("CHANGED: vnet exists but requested state is 'absent'")
changed = True
except CloudError:
self.log('Vnet {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: vnet {0} does not exist but requested state is 'present'".format(self.name))
changed = True
self.results['changed'] = changed
self.results['state'] = results
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
if not results:
# create a new virtual network
self.log("Create virtual network {0}".format(self.name))
if not self.address_prefixes_cidr:
self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network')
vnet = VirtualNetwork(
location=self.location,
address_space=AddressSpace(
address_prefixes=self.address_prefixes_cidr
)
)
if self.dns_servers:
vnet.dhcp_options = DhcpOptions(
dns_servers=self.dns_servers
)
if self.tags:
vnet.tags = self.tags
self.results['state'] = self.create_or_update_vnet(vnet)
else:
# update existing virtual network
self.log("Update virtual network {0}".format(self.name))
vnet = VirtualNetwork(
location=results['location'],
address_space=AddressSpace(
address_prefixes=results['address_prefixes']
),
tags=results['tags']
)
if results.get('dns_servers'):
vnet.dhcp_options = DhcpOptions(
dns_servers=results['dns_servers']
)
self.results['state'] = self.create_or_update_vnet(vnet)
elif self.state == 'absent':
self.delete_virtual_network()
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update_vnet(self, vnet):
try:
poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)
new_vnet = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc)))
return virtual_network_to_dict(new_vnet)
def delete_virtual_network(self):
try:
poller = self.network_client.virtual_networks.delete(self.resource_group, self.name)
result = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc)))
return result
def main():
AzureRMVirtualNetwork()
if __name__ == '__main__':
main()
| |
from collections import OrderedDict
import copy
import itertools
import regex as re
import random
def reverse_enumerate(iterable):
'''
Enumerate through an iterable in reverse, reporting the index consistent
with the original iterable.
'''
return itertools.izip(reversed(xrange(len(iterable))), reversed(iterable))
def generate_repeat_units():
'''
Given canonical bases, generate a set of all possible repeat units up to
a length of four.
'''
bases = ['A', 'C', 'G', 'T']
repeat_units = set(copy.copy(bases))
for i in range(3):
for unit in copy.copy(repeat_units):
for base in bases:
repeat_units.add(unit + base)
temp_repeat_units = set()
for repeat_unit in repeat_units:
keep = True
n = len(repeat_unit)
if n > 1:
if repeat_unit[0] * (n - 1) == repeat_unit[1:n]:
keep = False
if n == 4:
if repeat_unit[0:2] == repeat_unit[2:4]:
keep = False
if keep:
temp_repeat_units.add(repeat_unit)
repeat_units = temp_repeat_units
return repeat_units
def check_repeats(repeat_1, repeat_2):
'''
Check to see if repeat_1 is a possible permutation of repeat_2.
e.g. check_repeats('AGCT', 'GCTA') is True, check_repeats('AGCT', 'ATGC')
is False.
'''
if repeat_1 == repeat_2:
return True
elif len(repeat_1) == len(repeat_2):
for i in range(1, len(repeat_1)):
shuffled_repeat = repeat_1[i:] + repeat_1[:i]
if shuffled_repeat == repeat_2:
return True
return False
def create_repeat_file(fasta_file, output_file):
'''
For a given FASTA file, enumerate all repeats to an output file.
'''
repeat_units = generate_repeat_units()
sequences = OrderedDict()
seq_name = None
seq = ''
groups = dict()
for repeat_unit in repeat_units:
groups[repeat_unit] = dict()
for other_repeat_unit in repeat_units:
groups[repeat_unit][other_repeat_unit] = \
check_repeats(repeat_unit, other_repeat_unit)
with open(fasta_file) as f:
for line in f:
if line.startswith('>'): # New FASTA entry
sequences[seq_name] = seq
seq = ''
seq_name = re.split('>|\s+',line.strip())[1]
sequences[seq_name] = ''
else:
seq += line.strip()
sequences[seq_name] = seq
with open(output_file, 'w') as OUT:
for sequence_name, sequence in sequences.items():
matches = []
for repeat_unit in repeat_units:
repeat_length = len(repeat_unit)
fragments = []
for i in range(1, len(repeat_unit)):
fragments.append(repeat_unit[:-i])
search_pattern = '({}){{2,}}({}){{0,1}}'.format(
repeat_unit,
'|'.join(fragments),
)
last_start = None
for match in re.finditer(search_pattern, sequence,
overlapped=True):
keep = True
if last_start:
if match.start() - repeat_length == last_start:
keep = False
if keep:
matches.append({
'sequence': match.group(0),
'repeat_unit': repeat_unit,
'start': match.start(),
'end': match.end(),
})
last_start = match.start()
sort = sorted(matches, key=lambda x: (x['start'], -x['end']))
kept_matches = []
i = len(sort) - 1
while i >= 0:
keep = True
j = i - 1
while j >= 0:
if (
sort[i]['start'] >= sort[j]['start'] and
sort[i]['end'] <= sort[j]['end'] and
groups[sort[i]['repeat_unit']][sort[j]['repeat_unit']]
):
keep = False
break
if sort[i]['start'] > sort[j]['end']:
break
j = j - 1
if keep:
kept_matches.append(sort[i])
i = i - 1
for match in sorted(kept_matches, key=lambda x: x['start']):
OUT.write('\t'.join((
sequence_name,
match['sequence'],
str(len(match['repeat_unit'])),
match['repeat_unit'],
str(match['start']),
str(match['end']),
)) + '\n')
def extract_repeat_file_sample(repeat_file, sample_file, total):
'''
Extract a random sample of repeat loci from a genome-wide list
'''
with open(repeat_file, 'r', 1) as f:
for i, l in enumerate(f):
pass
i += 1
keep = dict(zip(random.sample(range(0, i), total),itertools.repeat(0)))
with open(repeat_file, 'r', 1) as f, open(sample_file, 'w') as OUT:
for x, line in enumerate(f):
if x in keep:
OUT.write(line)
| |
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import thread_cert
import mle
LEADER_1_2 = 1
ROUTER_1_1 = 2
REED_1_2 = 3
ROUTER_1_2 = 4
REED_1_1 = 5
MED_1_1 = 6
MED_1_2 = 7
# Topology
# (lq:2) (pp:1)
# REED_1_2 ----- ROUTER_1_2
# | \ / | \
# | \/ REED_1_1 \
# (lq:2) | / \ / `router` \
# | (lq:2) \ \
# | / / \ \
# LEADER_1_2 --- ROUTER_1_1 -- MED_1_2
# \ |
# \ |
# \ |
# MED_1_1
#
# 1) Bring up LEADER_1_2 and ROUTER_1_1,
# 2) Config link quality (LEADER_1_2->REED_1_2) as 2, bring up REED_1_2 which would attach to ROUTER_1_1
# due to higher two-way link quality,
# 3) Config link quality(LEADER_1_2->ROUTER_1_2) and link quality(REED_1_2->ROUTER_1_2) as 2, bring up
# ROUTER_1_2 which would attach to LEADER_1_2 due to active router is preferred,
# 4) Config parent priority as 1 on ROUTER_1_2, bring up REED_1_1 which would attach to ROUTER_1_2 due to
# higher parent priority,
# 5) Upgrade REED_1_1 to `router` role, bring up MED_1_1 which would attach to LEADER_1_2 which has higher
# link quality of 3,
# 6) Config parent priority as 1 on ROUTER_1_1, bring up MED_1_2 which would attach to ROUTER_1_2 due to
# higher version
#
class TestParentSelection(thread_cert.TestCase):
TOPOLOGY = {
LEADER_1_2: {
'version': '1.2',
'whitelist': [REED_1_2, ROUTER_1_2, REED_1_1, ROUTER_1_1, MED_1_1],
},
ROUTER_1_1: {
'version': '1.1',
'whitelist': [LEADER_1_2, REED_1_2, MED_1_2, MED_1_1],
},
REED_1_2: {
'version': '1.2',
'whitelist': [ROUTER_1_2, ROUTER_1_1, LEADER_1_2],
},
ROUTER_1_2: {
'version': '1.2',
'whitelist': [REED_1_2, MED_1_2, REED_1_1, LEADER_1_2],
},
REED_1_1: {
'version': '1.1',
'whitelist': [ROUTER_1_2, LEADER_1_2]
},
MED_1_1: {
'mode': 'rs',
'version': '1.1',
'whitelist': [LEADER_1_2, ROUTER_1_1],
},
MED_1_2: {
'mode': 'rs',
'version': '1.2',
'whitelist': [ROUTER_1_1, ROUTER_1_2],
},
}
"""All nodes are created with default configurations"""
def test(self):
self.nodes[LEADER_1_2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER_1_2].get_state(), 'leader')
self.nodes[ROUTER_1_1].set_router_selection_jitter(1)
self.nodes[ROUTER_1_1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER_1_1].get_state(), 'router')
# Mesh Impacting Criteria - Highest Two-way link quality
# REED_1_2 would attach to ROUTER_1_1
# Attach to ROUTER_1_1 which has highest two-way link quality
# Flush relative message queues
self.flush_nodes([LEADER_1_2, ROUTER_1_1])
self.nodes[LEADER_1_2].set_link_quality(
self.nodes[REED_1_2].get_addr64(), 2)
self.nodes[REED_1_2].set_router_selection_jitter(1)
self.nodes[REED_1_2].set_router_upgrade_threshold(1)
self.nodes[REED_1_2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[REED_1_2].get_state(), 'child')
# Check Parent Response
messages = self.simulator.get_messages_sent_by(ROUTER_1_1)
parent_prefer = messages.next_mle_message(
mle.CommandType.PARENT_RESPONSE)
assert (parent_prefer), "Error: Expected parent response not found"
messages = self.simulator.get_messages_sent_by(LEADER_1_2)
parent_cmp = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_cmp), "Error: Expected parent response not found"
# Known that link margin for link quality 3 is 80 and link quality 2 is 15
assert ((parent_prefer.get_mle_message_tlv(mle.LinkMargin).link_margin -
parent_cmp.get_mle_message_tlv(mle.LinkMargin).link_margin) >
20)
# Check Child Id Request
messages = self.simulator.get_messages_sent_by(REED_1_2)
msg = messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertSentToNode(self.nodes[ROUTER_1_1])
# Mesh Impacting Criteria - Active Routers over REEDs
# ROUTER_1_2 would attach to LEADER_1_2
# Link quality configuration, so that REED_1_2 has the chance to respond
# Flush relative message queues
self.flush_nodes([LEADER_1_2, REED_1_2])
self.nodes[LEADER_1_2].set_link_quality(
self.nodes[ROUTER_1_2].get_addr64(), 2)
self.nodes[REED_1_2].set_link_quality(
self.nodes[ROUTER_1_2].get_addr64(), 2)
self.nodes[ROUTER_1_2].set_router_selection_jitter(1)
self.nodes[ROUTER_1_2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER_1_2].get_state(), 'router')
# Check Parent Response
messages = self.simulator.get_messages_sent_by(LEADER_1_2)
# Skip first response for first parent request
assert messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
parent_prefer = messages.next_mle_message(
mle.CommandType.PARENT_RESPONSE)
assert (parent_prefer), "Error: Expected parent response not found"
messages = self.simulator.get_messages_sent_by(REED_1_2)
parent_cmp = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_cmp), "Error: Expected parent response not found"
assert (parent_prefer.get_mle_message_tlv(
mle.LinkMargin).link_margin == parent_cmp.get_mle_message_tlv(
mle.LinkMargin).link_margin)
# Check Child Id Request
messages = self.simulator.get_messages_sent_by(ROUTER_1_2)
msg = messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertSentToNode(self.nodes[LEADER_1_2])
# Mesh Impacting Criteria - Highest Parent Priority value in the Connectivity TLV
# REED_1_1 would attach to ROUTER_1_2
# Flush relative message queues
self.flush_nodes([LEADER_1_2, ROUTER_1_2])
self.nodes[ROUTER_1_2].set_parent_priority(1)
self.nodes[REED_1_1].set_router_selection_jitter(1)
self.nodes[REED_1_1].set_router_upgrade_threshold(1)
self.nodes[REED_1_1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[REED_1_1].get_state(), 'child')
# Check Parent Response
messages = self.simulator.get_messages_sent_by(ROUTER_1_2)
parent_prefer = messages.next_mle_message(
mle.CommandType.PARENT_RESPONSE)
assert (parent_prefer), "Error: Expected parent response not found"
messages = self.simulator.get_messages_sent_by(LEADER_1_2)
parent_cmp = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_cmp), "Error: Expected parent response not found"
assert (parent_prefer.get_mle_message_tlv(
mle.LinkMargin).link_margin == parent_cmp.get_mle_message_tlv(
mle.LinkMargin).link_margin)
assert (parent_prefer.get_mle_message_tlv(mle.Connectivity).pp >
parent_cmp.get_mle_message_tlv(mle.Connectivity).pp)
# Check Child Id Request
messages = self.simulator.get_messages_sent_by(REED_1_1)
msg = messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertSentToNode(self.nodes[ROUTER_1_2])
# Mesh Impacting Criteria - Router with the most high-quality neighbors
# (Link Quality 3 field in the Connectivity TLV)
# MED_1_1 would attach to LEADER_1_2
self.nodes[REED_1_1].set_state('router')
self.simulator.go(5)
self.assertEqual(self.nodes[REED_1_1].get_state(), 'router')
# Flush relative message queues
self.flush_nodes([LEADER_1_2, ROUTER_1_1])
self.nodes[MED_1_1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[MED_1_1].get_state(), 'child')
# Check Parent Response
messages = self.simulator.get_messages_sent_by(LEADER_1_2)
parent_prefer = messages.next_mle_message(
mle.CommandType.PARENT_RESPONSE)
assert (parent_prefer), "Error: Expected parent response not found"
messages = self.simulator.get_messages_sent_by(ROUTER_1_1)
parent_cmp = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_cmp), "Error: Expected parent response not found"
assert (parent_prefer.get_mle_message_tlv(
mle.LinkMargin).link_margin == parent_cmp.get_mle_message_tlv(
mle.LinkMargin).link_margin)
assert (parent_prefer.get_mle_message_tlv(
mle.Connectivity).pp == parent_cmp.get_mle_message_tlv(
mle.Connectivity).pp)
assert (parent_prefer.get_mle_message_tlv(
mle.Connectivity).link_quality_3 > parent_cmp.get_mle_message_tlv(
mle.Connectivity).link_quality_3)
# Check Child Id Request
messages = self.simulator.get_messages_sent_by(MED_1_1)
msg = messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertSentToNode(self.nodes[LEADER_1_2])
# Child Impacting Criteria - A Version number in the Version TLV
# equal to or higher than the version that implements features
# desirable to the Child MED_1_2 would attach to ROUTER_1_2
# Flush relative message queues
self.flush_nodes([ROUTER_1_2, ROUTER_1_1])
self.nodes[ROUTER_1_1].set_parent_priority(1)
self.nodes[MED_1_2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[MED_1_2].get_state(), 'child')
# Check Parent Response
messages = self.simulator.get_messages_sent_by(ROUTER_1_2)
parent_prefer = messages.next_mle_message(
mle.CommandType.PARENT_RESPONSE)
assert (parent_prefer), "Error: Expected parent response not found"
messages = self.simulator.get_messages_sent_by(ROUTER_1_1)
parent_cmp = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_cmp), "Error: Expected parent response not found"
assert (parent_prefer.get_mle_message_tlv(
mle.LinkMargin).link_margin == parent_cmp.get_mle_message_tlv(
mle.LinkMargin).link_margin)
assert (parent_prefer.get_mle_message_tlv(
mle.Connectivity).pp == parent_cmp.get_mle_message_tlv(
mle.Connectivity).pp)
assert (parent_prefer.get_mle_message_tlv(
mle.Connectivity).link_quality_3 == parent_cmp.get_mle_message_tlv(
mle.Connectivity).link_quality_3)
assert (parent_prefer.get_mle_message_tlv(mle.Version).version >
parent_cmp.get_mle_message_tlv(mle.Version).version)
# Check Child Id Request
messages = self.simulator.get_messages_sent_by(MED_1_2)
msg = messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertSentToNode(self.nodes[ROUTER_1_2])
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Utils exporting data from AFF4 to the rest of the world."""
import os
import Queue
import stat
import time
import logging
from grr.lib import aff4
from grr.lib import client_index
from grr.lib import rdfvalue
from grr.lib import serialize
from grr.lib import threadpool
from grr.lib import utils
from grr.lib.aff4_objects import aff4_grr
from grr.lib.flows.general import file_finder
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
BUFFER_SIZE = 16 * 1024 * 1024
def GetAllClients(token=None):
"""Return a list of all client urns."""
index = aff4.FACTORY.Create(
client_index.MAIN_INDEX, aff4_type="ClientIndex",
mode="rw", object_exists=True, token=token)
return index.LookupClients(["."])
class IterateAllClientUrns(object):
"""Class to iterate over all URNs."""
THREAD_POOL_NAME = "ClientUrnIter"
QUEUE_TIMEOUT = 30
def __init__(self, func=None, max_threads=10, token=None):
"""Iterate over all clients in a threadpool.
Args:
func: A function to call with each client urn.
max_threads: Number of threads to use.
token: Auth token.
Raises:
RuntimeError: If function not specified.
"""
self.thread_pool = threadpool.ThreadPool.Factory(self.THREAD_POOL_NAME,
max_threads)
self.thread_pool.Start()
self.token = token
self.func = func
self.broken_subjects = [] # Entries that are broken or fail to run.
self.out_queue = Queue.Queue()
def GetInput(self):
"""Yield client urns."""
clients = GetAllClients(token=self.token)
logging.debug("Got %d clients", len(clients))
return clients
def Run(self):
"""Run the iteration."""
count = 0
for count, input_data in enumerate(self.GetInput()):
if count % 2000 == 0:
logging.debug("%d processed.", count)
args = (input_data, self.out_queue, self.token)
self.thread_pool.AddTask(target=self.IterFunction, args=args,
name=self.THREAD_POOL_NAME)
while count >= 0:
try:
# We only use the timeout to wait if we got to the end of the Queue but
# didn't process everything yet.
out = self.out_queue.get(timeout=self.QUEUE_TIMEOUT, block=True)
if out:
yield out
count -= 1
except Queue.Empty:
break
# Join and stop to clean up the threadpool.
self.thread_pool.Stop()
def IterFunction(self, *args):
"""Function to run on each input. This can be overridden."""
self.func(*args)
class IterateAllClients(IterateAllClientUrns):
"""Class to iterate over all GRR Client objects."""
def __init__(self, max_age, client_chunksize=25, **kwargs):
"""Iterate over all clients in a threadpool.
Args:
max_age: Maximum age in seconds of clients to check.
client_chunksize: A function to call with each client urn.
**kwargs: Arguments passed to init.
"""
super(IterateAllClients, self).__init__(**kwargs)
self.client_chunksize = client_chunksize
self.max_age = max_age
def GetInput(self):
"""Yield client urns."""
client_list = GetAllClients(token=self.token)
logging.debug("Got %d clients", len(client_list))
for client_group in utils.Grouper(client_list, self.client_chunksize):
for fd in aff4.FACTORY.MultiOpen(client_group, mode="r",
aff4_type="VFSGRRClient",
token=self.token):
if isinstance(fd, aff4_grr.VFSGRRClient):
# Skip if older than max_age
oldest_time = (time.time() - self.max_age) * 1e6
if fd.Get(aff4.VFSGRRClient.SchemaCls.PING) >= oldest_time:
yield fd
def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE):
"""Download an aff4 file to the local filesystem overwriting it if it exists.
Args:
file_obj: An aff4 object that supports the file interface (Read, Seek)
target_path: Full path of file to write to.
buffer_size: Read in chunks this size.
"""
logging.info(u"Downloading: %s to: %s", file_obj.urn, target_path)
target_file = open(target_path, "w")
file_obj.Seek(0)
count = 0
data_buffer = file_obj.Read(buffer_size)
while data_buffer:
target_file.write(data_buffer)
data_buffer = file_obj.Read(buffer_size)
count += 1
if not count % 3:
logging.debug(u"Downloading: %s: %s done", file_obj.urn,
utils.FormatNumberAsString(count * buffer_size))
target_file.close()
def RecursiveDownload(dir_obj, target_dir, max_depth=10, depth=1,
overwrite=False, max_threads=10):
"""Recursively downloads a file entry to the target path.
Args:
dir_obj: An aff4 object that contains children.
target_dir: Full path of the directory to write to.
max_depth: Depth to download to. 1 means just the directory itself.
depth: Current depth of recursion.
overwrite: Should we overwrite files that exist.
max_threads: Use this many threads to do the downloads.
"""
if (not isinstance(dir_obj, aff4.AFF4Volume) or
isinstance(dir_obj, aff4.HashImage)):
return
# Reuse the same threadpool as we call recursively.
thread_pool = threadpool.ThreadPool.Factory("Downloader", max_threads)
thread_pool.Start()
for sub_file_entry in dir_obj.OpenChildren():
path_elements = [target_dir]
sub_target_dir = u"/".join(path_elements)
try:
# Any file-like object with data in AFF4 should inherit AFF4Stream.
if isinstance(sub_file_entry, aff4.AFF4Stream):
args = (sub_file_entry.urn, sub_target_dir, sub_file_entry.token,
overwrite)
thread_pool.AddTask(target=CopyAFF4ToLocal, args=args,
name="Downloader")
elif "Container" in sub_file_entry.behaviours:
if depth >= max_depth: # Don't go any deeper.
continue
try:
os.makedirs(sub_target_dir)
except OSError:
pass
RecursiveDownload(sub_file_entry, sub_target_dir, overwrite=overwrite,
depth=depth + 1)
except IOError:
logging.exception("Unable to download %s", sub_file_entry.urn)
finally:
sub_file_entry.Close()
# Join and stop the threadpool.
if depth <= 1:
thread_pool.Stop()
def DownloadCollection(coll_path, target_path, token=None, overwrite=False,
dump_client_info=False, flatten=False,
max_threads=10):
"""Iterate through a Collection object downloading all files.
Args:
coll_path: Path to an AFF4 collection.
target_path: Base directory to write to.
token: Token for access.
overwrite: If True, overwrite existing files.
dump_client_info: If True, this will detect client paths, and dump a yaml
version of the client object to the root path. This is useful for seeing
the hostname/users of the machine the client id refers to.
flatten: If True, produce a "files" flat folder with links to all the found
files.
max_threads: Use this many threads to do the downloads.
"""
completed_clients = set()
try:
coll = aff4.FACTORY.Open(coll_path, aff4_type="RDFValueCollection",
token=token)
except IOError:
logging.error("%s is not a valid collection. Typo? "
"Are you sure something was written to it?", coll_path)
return
thread_pool = threadpool.ThreadPool.Factory("Downloader", max_threads)
thread_pool.Start()
logging.info("Expecting to download %s files", coll.size)
# Collections can include anything they want, but we only handle RDFURN and
# StatEntry entries in this function.
for grr_message in coll:
source = None
# If a raw message, work out the type.
if isinstance(grr_message, rdf_flows.GrrMessage):
source = grr_message.source
grr_message = grr_message.payload
# Collections can contain AFF4ObjectSummary objects which encapsulate
# RDFURNs and StatEntrys.
if isinstance(grr_message, rdf_client.AFF4ObjectSummary):
urn = grr_message.urn
elif isinstance(grr_message, rdfvalue.RDFURN):
urn = grr_message
elif isinstance(grr_message, rdf_client.StatEntry):
urn = rdfvalue.RDFURN(grr_message.aff4path)
elif isinstance(grr_message, file_finder.FileFinderResult):
urn = rdfvalue.RDFURN(grr_message.stat_entry.aff4path)
elif isinstance(grr_message, rdfvalue.RDFBytes):
try:
os.makedirs(target_path)
except OSError:
pass
try:
# We just dump out bytes and carry on.
client_id = source.Split()[0]
with open(os.path.join(target_path, client_id), "wb") as fd:
fd.write(str(grr_message))
except AttributeError:
pass
continue
else:
continue
# Handle dumping client info, but only once per client.
client_id = urn.Split()[0]
re_match = aff4.AFF4Object.VFSGRRClient.CLIENT_ID_RE.match(client_id)
if dump_client_info and re_match and client_id not in completed_clients:
args = (rdf_client.ClientURN(client_id), target_path, token, overwrite)
thread_pool.AddTask(target=DumpClientYaml, args=args,
name="ClientYamlDownloader")
completed_clients.add(client_id)
# Now queue downloading the actual files.
args = (urn, target_path, token, overwrite)
if flatten:
target = CopyAndSymlinkAFF4ToLocal
else:
target = CopyAFF4ToLocal
thread_pool.AddTask(target=target, args=args, name="Downloader")
# Join and stop the threadpool.
thread_pool.Stop()
def CopyAFF4ToLocal(aff4_urn, target_dir, token=None, overwrite=False):
"""Copy an AFF4 object that supports a read interface to local filesystem.
Args:
aff4_urn: URN of thing to copy.
target_dir: Directory to copy the file to.
token: Auth token.
overwrite: If True overwrite the file if it exists.
Returns:
If aff4_urn points to a file, returns path to the downloaded file.
Otherwise returns None.
By default file will only be overwritten if file size differs.
"""
try:
fd = aff4.FACTORY.Open(aff4_urn, token=token)
filepath = os.path.join(target_dir, fd.urn.Path()[1:])
# If urn points to a directory, just create it.
if isinstance(fd, aff4.VFSDirectory):
try:
os.makedirs(filepath)
except OSError:
pass
return None
# If urn points to a file, download it.
elif isinstance(fd, aff4.AFF4Stream):
if not os.path.isfile(filepath):
try:
# Ensure directory exists.
os.makedirs(os.path.dirname(filepath))
except OSError:
pass
DownloadFile(fd, filepath)
elif (os.stat(filepath)[stat.ST_SIZE] != fd.Get(fd.Schema.SIZE) or
overwrite):
# We should overwrite because user said, or file sizes differ.
DownloadFile(fd, filepath)
else:
logging.info("File %s exists, skipping", filepath)
return filepath
else:
raise RuntimeError("Opened urn is neither a downloaded file nor a "
"directory: %s" % aff4_urn)
except IOError as e:
logging.exception("Failed to read %s due to %s", aff4_urn, e)
raise
def CopyAndSymlinkAFF4ToLocal(aff4_urn, target_dir, token=None,
overwrite=False):
path = CopyAFF4ToLocal(aff4_urn, target_dir, token=token,
overwrite=overwrite)
if path:
files_output_dir = os.path.join(target_dir, "files")
try:
os.makedirs(files_output_dir)
except OSError:
pass
unique_name = "_".join(aff4_urn.Split())
symlink_path = os.path.join(files_output_dir, unique_name)
try:
os.symlink(path, symlink_path)
except OSError:
logging.exception("Can't create symlink to a file: %s -> %s",
symlink_path, path)
def DumpClientYaml(client_urn, target_dir, token=None, overwrite=False):
"""Dump a yaml file containing client info."""
fd = aff4.FACTORY.Open(client_urn, "VFSGRRClient", token=token)
dirpath = os.path.join(target_dir, fd.urn.Split()[0])
try:
# Due to threading this can actually be created by another thread.
os.makedirs(dirpath)
except OSError:
pass
filepath = os.path.join(dirpath, "client_info.yaml")
if not os.path.isfile(filepath) or overwrite:
with open(filepath, "w") as out_file:
out_file.write(serialize.YamlDumper(fd))
| |
"""Test the utils file for the mailer app"""
# pylint: disable=invalid-name
import base64
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.dateparse import parse_datetime
from mock import Mock, patch
from open_connect.accounts.utils import generate_nologin_hash
from open_connect.mailer import utils
from open_connect.mailer.models import EmailOpen
# The proper url_represenatation_encode version of OPEN_DATA
OPEN_DATA_ENCODED = ('az11TFNiZ0FTd1drJmU9bWUlNDByYXp6bWF0YXp6LmxvY2FsJnQ9MjAx'
'NC0wNC0wNysxNyUzQTAxJTNBMTIlMkIwMCUzQTAwJm49MTA')
# The url_represenatation_encode hash that assumes EMAIL_SECRET_KEY is 'abcd'
OPEN_DATA_HASH = 'e3ace8b556'
OPEN_DATA = {
'e': 'me@razzmatazz.local',
'k': 'uLSbgASwWk',
'n': '10',
't': '2014-04-07 17:01:12+00:00'
}
DEMO_USER_AGENT = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2)'
' AppleWebKit/537.36 (KHTML, like Gecko)'
' Chrome/33.0.1750.152 Safari/537.36')
@override_settings(ORIGIN='http://connect.local', EMAIL_SECRET_KEY='abcd')
class TestUnsubscribeURLGenerator(TestCase):
"""Test the unsubscribe_url function"""
def test_unsubscribe_url(self):
"""Test that all the required components are in the URL"""
result = utils.unsubscribe_url('test123@example.com')
unsub_url = reverse('unsubscribe')
code = generate_nologin_hash('test123@example.com')
self.assertIn(unsub_url, result)
self.assertIn('code=%s' % code, result)
self.assertIn('email=test123@example.com', result)
self.assertIn('http://connect.local', result)
@override_settings(EMAIL_SECRET_KEY='abcd')
class TestUrlRepresentationProcessing(TestCase):
"""Tests for url_representation_encode and url_representation_decode"""
def test_url_representation_encode(self):
"""Test the url_representation_encode functionality"""
# As we're passing in a dictionary we cannot guarantee that the order
# of the URL string will be consistent, so we'll have to test for both
encoded_options = [
base64.urlsafe_b64encode('first=Stanley&last=Smith').strip('='),
base64.urlsafe_b64encode('last=Smith&first=Stanley').strip('=')
]
data = {
'first': 'Stanley',
'last': 'Smith'
}
code, verification_hash = utils.url_representation_encode(data)
self.assertTrue(any([option == code for option in encoded_options]))
# Ensure that the verification hash is a 10 character string
self.assertEqual(len(verification_hash), 10)
self.assertIsInstance(verification_hash, str)
def test_url_representation_decode(self):
"""Test the url_representation_decode functionality"""
result, verification_hash = utils.url_representation_decode(
OPEN_DATA_ENCODED)
self.assertDictEqual(result, OPEN_DATA)
self.assertEqual(verification_hash, OPEN_DATA_HASH)
def test_roundtrip(self):
"""Test that an identical dictionary can be encoded and decoded"""
test_dict = {
'special_char': '%##!@#9813&&&&',
'email': 'me@razzmatazz.local'
}
code, verification_hash = utils.url_representation_encode(test_dict)
# Decode the data, verify we can reverse the process
decoded_data, decoded_hash = utils.url_representation_decode(code)
self.assertEqual(decoded_hash, verification_hash)
self.assertDictEqual(decoded_data, test_dict)
class TestAddressCleaner(TestCase):
"""Test the address_cleaner function"""
def test_valid_list(self):
"""Test where every address is valid"""
emails = ('Nick Cat <nickcat@dj.local>, Jordan <jordan@dj.local>,'
'Jack <jotus@dj.local>, Grace <gotus@dj.local>')
result = utils.clean_addresses(emails)
self.assertEqual(result, [
('Nick Cat', 'nickcat@dj.local'), ('Jordan', 'jordan@dj.local'),
('Jack', 'jotus@dj.local'), ('Grace', 'gotus@dj.local')
])
def test_with_invalid_addresses(self):
"""Test where there are some invalid email addresses"""
emails = ('Nick Cat <nickcat@dj.local>, Jordan <jordan@dj.local>,'
'Jack <jotus@dj.local>, tester@, Not An Email,'
'Bad Address <@>, Grace <gotus@dj.local>')
result = utils.clean_addresses(emails)
self.assertEqual(result, [
('Nick Cat', 'nickcat@dj.local'), ('Jordan', 'jordan@dj.local'),
('Jack', 'jotus@dj.local'), ('Grace', 'gotus@dj.local')
])
def test_empty_list(self):
"""Test where an empty string is passed to the function"""
result = utils.clean_addresses('')
self.assertEqual(result, [])
def test_lowercase_email_addresses(self):
"""Test that the function lowercases all addresses"""
emails = ('Nick Cat <niCkcAt@dj.local>, Jordan <Jordan@dj.local>,'
'Jack <JotUS@dj.local>, Grace <GotUS@dj.local>')
result = utils.clean_addresses(emails)
self.assertEqual(result, [
('Nick Cat', u'nickcat@dj.local'), ('Jordan', 'jordan@dj.local'),
('Jack', 'jotus@dj.local'), ('Grace', 'gotus@dj.local')
])
@override_settings(EMAIL_SECRET_KEY='abcd')
class TestGenerateCode(TestCase):
"""Tests for generate_code"""
def test_generate_code(self):
"""Test the generate_code function returns a 10 character string"""
code = utils.generate_code()
self.assertEqual(len(code), 10)
self.assertIsInstance(code, str)
@patch('open_connect.mailer.utils.uuid')
def test_generate_code_removes_special_characters(self, mock):
"""Test the generate code special character removal functionality."""
mock.uuid4().hex.decode().encode.return_value = '1+2#3*4/5+6-7%8+90ABC'
code = utils.generate_code()
# Ensure that the result is the base64 code
self.assertEqual(code, '1234567890')
class TestUserAgentProcessing(TestCase):
"""Tests for User Agent Processing Functionality"""
def test_prettify_agent_no_minor_no_patch(self):
"""Test the prettify_agent_version with no minor or patch number"""
data = {
'family': 'Internet Explorer',
'major': '10'
}
result = utils.prettify_agent_version(data)
self.assertEqual(result, 'Internet Explorer 10')
def test_prettify_agent(self):
"""Test the prettify_agent_version with all fields"""
data = {
'family': 'Chrome',
'major': '33',
'minor': '0',
'patch': '1750'
}
result = utils.prettify_agent_version(data)
self.assertEqual(result, 'Chrome 33.0.1750')
def test_processuseragent_desktop(self):
"""Test the process useragent function with a desktop client"""
operating_system, browser, device = utils.process_useragent(
DEMO_USER_AGENT)
self.assertEqual(operating_system, 'Mac OS X 10.9.2')
self.assertEqual(browser, 'Chrome 33.0.1750')
self.assertEqual(device, 'Other')
def test_process_user_agent_mobile(self):
"""Test the process useragent function with a mobile client"""
user_agent = ('Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X)'
' AppleWebKit/536.26 (KHTML, like Gecko)'
' Version/6.0 Mobile/10A5376e Safari/8536.25')
operating_system, browser, device = utils.process_useragent(user_agent)
self.assertEqual(operating_system, 'iOS 6.0')
self.assertEqual(browser, 'Mobile Safari 6.0')
self.assertEqual(device, 'iPhone')
class TestCreateOpen(TestCase):
"""Test the create_open function"""
def setUp(self):
"""Setup the Create Open Test"""
self.headers = {
'REMOTE_ADDR': '127.0.0.1',
'HTTP_USER_AGENT': DEMO_USER_AGENT,
'HTTP_REFERER': 'https://mail.google.com/mail/u/1/'
}
def test_open(self):
"""Test a full open creation"""
initial_count = EmailOpen.objects.count()
utils.create_open(OPEN_DATA, self.headers)
new_count = EmailOpen.objects.count()
self.assertEqual(new_count, initial_count + 1)
new_open = EmailOpen.objects.latest('pk')
self.assertEqual(new_open.email, 'me@razzmatazz.local')
self.assertEqual(
new_open.timestamp,
parse_datetime('2014-04-07 17:01:12+00:00')
)
self.assertEqual(new_open.notification, 10)
self.assertEqual(new_open.ip_address, '127.0.0.1')
self.assertEqual(
new_open.user_agent,
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2)'
' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152'
' Safari/537.36'
)
self.assertEqual(new_open.referrer_netloc, 'mail.google.com')
self.assertEqual(
new_open.referrer, 'https://mail.google.com/mail/u/1/')
self.assertEqual(new_open.operating_system, 'Mac OS X 10.9.2')
self.assertEqual(new_open.browser, 'Chrome 33.0.1750')
self.assertEqual(new_open.device_family, 'Other')
def test_open_no_notification(self):
"""Test creating an open without a notification id"""
initial_count = EmailOpen.objects.count()
data = OPEN_DATA.copy()
del data['n']
utils.create_open(data, self.headers)
new_count = EmailOpen.objects.count()
self.assertEqual(new_count, initial_count + 1)
new_open = EmailOpen.objects.latest('pk')
self.assertIsNone(new_open.notification)
def test_open_no_referrer(self):
"""Test creating an open without a referrer"""
initial_count = EmailOpen.objects.count()
headers = self.headers.copy()
del headers['HTTP_REFERER']
utils.create_open(OPEN_DATA, headers)
new_count = EmailOpen.objects.count()
self.assertEqual(new_count, initial_count + 1)
new_open = EmailOpen.objects.latest('pk')
self.assertIsNone(new_open.referrer_netloc)
self.assertIsNone(new_open.referrer)
def test_open_no_user_agent(self):
"""Test creating an open without a user agent"""
initial_count = EmailOpen.objects.count()
headers = self.headers.copy()
del headers['HTTP_USER_AGENT']
utils.create_open(OPEN_DATA, headers)
new_count = EmailOpen.objects.count()
self.assertEqual(new_count, initial_count + 1)
new_open = EmailOpen.objects.latest('pk')
self.assertIsNone(new_open.operating_system)
self.assertIsNone(new_open.browser)
self.assertIsNone(new_open.device_family)
def test_multiple_ip_addresses(self):
"""X_FORWARDED_FOR can sometimes send multiple IPs, should use first."""
initial_count = EmailOpen.objects.count()
headers = self.headers.copy()
headers['HTTP_X_FORWARDED_FOR'] = '1.1.1.1, 2.2.2.2, 3.3.3.3'
utils.create_open(OPEN_DATA, headers)
new_count = EmailOpen.objects.count()
self.assertEqual(new_count, initial_count + 1)
new_open = EmailOpen.objects.latest('pk')
self.assertEqual(new_open.ip_address, '1.1.1.1')
@patch.object(utils, 'EmailMultiAlternatives')
class TestSendEmail(TestCase):
"""Test the send_email helper"""
# pylint: disable=no-self-use
def test_send_email(self, mock):
"""Test the functionality of the send_email helper"""
email_mock = Mock()
mock.return_value = email_mock
utils.send_email(
email='gracegrant@razzmatazz.local',
from_email='no-reply@razzmatazz.local',
subject='Updates',
text='You have a new message. someurl',
html='this is my snippet someurl'
)
mock.assert_called_once_with(
body='You have a new message. someurl',
to=(u'gracegrant@razzmatazz.local',),
subject='Updates',
from_email='no-reply@razzmatazz.local'
)
email_mock.attach_alternative.assert_called_once_with(
mimetype='text/html', content='this is my snippet someurl'
)
| |
#!/usr/bin/env python
# coding: utf-8
from datetime import datetime
from distutils import spawn
import argparse
import json
import os
import platform
import shutil
import socket
import sys
import urllib
import urllib2
import main
###############################################################################
# Options
###############################################################################
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'-d', '--dependencies', dest='install_dependencies', action='store_true',
help='install virtualenv and python dependencies',
)
PARSER.add_argument(
'-s', '--start', dest='start', action='store_true',
help='starts the dev_appserver.py with storage_path pointing to temp',
)
PARSER.add_argument(
'-o', '--host', dest='host', action='store', default='127.0.0.1',
help='the host to start the dev_appserver.py',
)
PARSER.add_argument(
'-p', '--port', dest='port', action='store', default='8080',
help='the port to start the dev_appserver.py',
)
PARSER.add_argument(
'--appserver-args', dest='args', nargs=argparse.REMAINDER, default=[],
help='all following args are passed to dev_appserver.py',
)
PARSER.add_argument(
'-v', '--version', dest='show_version', action='store_true',
help='Show gae-init version',
)
ARGS = PARSER.parse_args()
###############################################################################
# Globals
###############################################################################
BAD_ENDINGS = ['pyc', 'pyo', '~']
GAE_PATH = ''
IS_WINDOWS = platform.system() == 'Windows'
###############################################################################
# Directories
###############################################################################
DIR_MAIN = 'main'
DIR_TEMP = 'temp'
DIR_VENV = os.path.join(DIR_TEMP, 'venv')
DIR_LIB = os.path.join(DIR_MAIN, 'lib')
DIR_LIBX = os.path.join(DIR_MAIN, 'libx')
FILE_LIB = '%s.zip' % DIR_LIB
FILE_REQUIREMENTS = 'requirements.txt'
FILE_PIP_GUARD = os.path.join(DIR_TEMP, 'pip.guard')
FILE_VENV = os.path.join(DIR_VENV, 'Scripts', 'activate.bat') \
if IS_WINDOWS \
else os.path.join(DIR_VENV, 'bin', 'activate')
DIR_STORAGE = os.path.join(DIR_TEMP, 'storage')
FILE_UPDATE = os.path.join(DIR_TEMP, 'update.json')
###############################################################################
# Other global variables
###############################################################################
CORE_VERSION_URL = 'https://gae-init.appspot.com/_s/version/'
INERNET_TEST_URL = 'https://www.google.com'
REQUIREMENTS_URL = 'http://docs.gae-init.appspot.com/requirement/'
###############################################################################
# Helpers
###############################################################################
def print_out(script, filename=''):
timestamp = datetime.now().strftime('%H:%M:%S')
if not filename:
filename = '-' * 46
script = script.rjust(12, '-')
print '[%s] %12s %s' % (timestamp, script, filename)
def make_dirs(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def listdir(directory, split_ext=False):
try:
if split_ext:
return [os.path.splitext(dir_)[0] for dir_ in os.listdir(directory)]
else:
return os.listdir(directory)
except OSError:
return []
def site_packages_path():
if IS_WINDOWS:
return os.path.join(DIR_VENV, 'Lib', 'site-packages')
py_version = 'python%s.%s' % sys.version_info[:2]
return os.path.join(DIR_VENV, 'lib', py_version, 'site-packages')
def create_virtualenv():
if not os.path.exists(FILE_VENV):
os.system('virtualenv --no-site-packages %s' % DIR_VENV)
os.system('echo %s >> %s' % (
'set PYTHONPATH=' if IS_WINDOWS else 'unset PYTHONPATH', FILE_VENV
))
pth_file = os.path.join(site_packages_path(), 'gae.pth')
echo_to = 'echo %s >> {pth}'.format(pth=pth_file)
os.system(echo_to % find_gae_path())
os.system(echo_to % os.path.abspath(DIR_LIBX))
fix_path_cmd = 'import dev_appserver; dev_appserver.fix_sys_path()'
os.system(echo_to % (
fix_path_cmd if IS_WINDOWS else '"%s"' % fix_path_cmd
))
return True
def exec_pip_commands(command):
script = []
if create_virtualenv():
activate_cmd = 'call %s' if IS_WINDOWS else 'source %s'
activate_cmd %= FILE_VENV
script.append(activate_cmd)
script.append('echo %s' % command)
script.append('%s SKIP_GOOGLEAPICLIENT_COMPAT_CHECK=1' %
('set' if IS_WINDOWS else 'export'))
script.append(command)
script = '&'.join(script) if IS_WINDOWS else \
'/bin/bash -c "%s"' % ';'.join(script)
os.system(script)
def make_guard(fname, cmd, spec):
with open(fname, 'w') as guard:
guard.write('Prevents %s execution if newer than %s' % (cmd, spec))
def guard_is_newer(guard, watched):
if os.path.exists(guard):
return os.path.getmtime(guard) > os.path.getmtime(watched)
return False
def check_if_pip_should_run():
return not guard_is_newer(FILE_PIP_GUARD, FILE_REQUIREMENTS)
def install_py_libs():
if not check_if_pip_should_run() and os.path.exists(DIR_LIB):
return
exec_pip_commands('pip install -q -r %s' % FILE_REQUIREMENTS)
exclude_ext = ['.pth', '.pyc', '.egg-info', '.dist-info', '.so']
exclude_prefix = ['setuptools-', 'pip-', 'Pillow-']
exclude = [
'test', 'tests', 'pip', 'setuptools', '_markerlib', 'PIL',
'easy_install.py', 'pkg_resources.py'
]
def _exclude_prefix(pkg):
for prefix in exclude_prefix:
if pkg.startswith(prefix):
return True
return False
def _exclude_ext(pkg):
for ext in exclude_ext:
if pkg.endswith(ext):
return True
return False
def _get_dest(pkg):
make_dirs(DIR_LIB)
return os.path.join(DIR_LIB, pkg)
site_packages = site_packages_path()
dir_libs = listdir(DIR_LIB)
dir_libs.extend(listdir(DIR_LIBX))
for dir_ in listdir(site_packages):
if dir_ in dir_libs or dir_ in exclude:
continue
if _exclude_prefix(dir_) or _exclude_ext(dir_):
continue
src_path = os.path.join(site_packages, dir_)
copy = shutil.copy if os.path.isfile(src_path) else shutil.copytree
copy(src_path, _get_dest(dir_))
make_guard(FILE_PIP_GUARD, 'pip', FILE_REQUIREMENTS)
def install_dependencies():
make_dirs(DIR_TEMP)
install_py_libs()
def check_for_update():
if os.path.exists(FILE_UPDATE):
mtime = os.path.getmtime(FILE_UPDATE)
last = datetime.utcfromtimestamp(mtime).strftime('%Y-%m-%d')
today = datetime.utcnow().strftime('%Y-%m-%d')
if last == today:
return
try:
with open(FILE_UPDATE, 'a'):
os.utime(FILE_UPDATE, None)
request = urllib2.Request(
CORE_VERSION_URL,
urllib.urlencode({'version': main.__version__}),
)
response = urllib2.urlopen(request)
with open(FILE_UPDATE, 'w') as update_json:
update_json.write(response.read())
except (urllib2.HTTPError, urllib2.URLError):
pass
def print_out_update(force_show=False):
try:
import pip
SemVer = pip.util.version.SemanticVersion
except AttributeError:
import pip._vendor.distlib.version
SemVer = pip._vendor.distlib.version.SemanticVersion
try:
with open(FILE_UPDATE, 'r') as update_json:
data = json.load(update_json)
if SemVer(main.__version__) < SemVer(data['version']) or force_show:
print_out('UPDATE')
print_out(data['version'], 'Latest version of gae-init')
print_out(main.__version__, 'Your version is a bit behind')
print_out('CHANGESET', data['changeset'])
except (ValueError, KeyError):
os.remove(FILE_UPDATE)
except IOError:
pass
###############################################################################
# Doctor
###############################################################################
def internet_on():
try:
urllib2.urlopen(INERNET_TEST_URL, timeout=2)
return True
except (urllib2.URLError, socket.timeout):
return False
def check_requirement(check_func):
result, name, help_url_id = check_func()
if not result:
print_out('NOT FOUND', name)
if help_url_id:
print 'Please see %s%s' % (REQUIREMENTS_URL, help_url_id)
return False
return True
def find_gae_path():
global GAE_PATH
if GAE_PATH:
return GAE_PATH
if IS_WINDOWS:
gae_path = None
for path in os.environ['PATH'].split(os.pathsep):
if os.path.isfile(os.path.join(path, 'dev_appserver.py')):
gae_path = path
else:
gae_path = spawn.find_executable('dev_appserver.py')
if gae_path:
gae_path = os.path.dirname(os.path.realpath(gae_path))
if not gae_path:
return ''
gcloud_exec = 'gcloud.cmd' if IS_WINDOWS else 'gcloud'
if not os.path.isfile(os.path.join(gae_path, gcloud_exec)):
GAE_PATH = gae_path
else:
gae_path = os.path.join(gae_path, '..', 'platform', 'google_appengine')
if os.path.exists(gae_path):
GAE_PATH = os.path.realpath(gae_path)
return GAE_PATH
def check_internet():
return internet_on(), 'Internet', ''
def check_gae():
return bool(find_gae_path()), 'Google App Engine SDK', '#gae'
def check_git():
return bool(spawn.find_executable('git')), 'Git', '#git'
def check_nodejs():
return bool(spawn.find_executable('node')), 'Node.js', '#nodejs'
def check_pip():
return bool(spawn.find_executable('pip')), 'pip', '#pip'
def check_virtualenv():
return bool(spawn.find_executable('virtualenv')), 'virtualenv', '#virtualenv'
def doctor_says_ok():
checkers = [check_gae, check_git, check_nodejs, check_pip, check_virtualenv]
if False in [check_requirement(check) for check in checkers]:
sys.exit(1)
return check_requirement(check_internet)
###############################################################################
# Main
###############################################################################
def run_start():
make_dirs(DIR_STORAGE)
port = int(ARGS.port)
run_command = ' '.join(map(str, [
'dev_appserver.py',
DIR_MAIN,
'--host %s' % ARGS.host,
'--port %s' % port,
'--admin_port %s' % (port + 1),
'--storage_path=%s' % DIR_STORAGE,
'--skip_sdk_update_check',
] + ARGS.args))
os.system(run_command)
def run():
if len(sys.argv) == 1 or (ARGS.args and not ARGS.start):
PARSER.print_help()
sys.exit(1)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if doctor_says_ok():
install_dependencies()
check_for_update()
if ARGS.show_version:
print_out_update(force_show=True)
else:
print_out_update()
if ARGS.start:
run_start()
if ARGS.install_dependencies:
install_dependencies()
if __name__ == '__main__':
run()
| |
from typing import Any, Callable, List, Optional, Tuple, Union
import jax
import jax.numpy as jnp
import numpy as np
from .custom_types import PyTree, TreeDef
from .deprecated import deprecated
#
# Filter functions
#
def is_array(element: Any) -> bool:
"""Returns `True` if `element` is a JAX array (but not a NumPy array)."""
return isinstance(element, jnp.ndarray)
# Does _not_ do a try/except on jnp.asarray(element) because that's very slow.
# Chosen to match
# https://github.com/google/jax/blob/4a17c78605e7fc69a69a999e2f6298db79d3837a/jax/_src/numpy/lax_numpy.py#L542 # noqa: E501
def is_array_like(element: Any) -> bool:
"""Returns `True` if `element` is a JAX array, a NumPy array, or a Python
`float`/`complex`/`bool`/`int`.
"""
return isinstance(
element, (jnp.ndarray, np.ndarray, float, complex, bool, int)
) or hasattr(element, "__jax_array__")
def is_inexact_array(element: Any) -> bool:
"""Returns `True` if `element` is an inexact (i.e. floating point) JAX array."""
return is_array(element) and jnp.issubdtype(element.dtype, jnp.inexact)
def is_inexact_array_like(element: Any) -> bool:
"""Returns `True` if `element` is an inexact JAX array, an inexact NumPy array, or
a Python `float` or `complex`.
"""
if hasattr(element, "__jax_array__"):
element = element.__jax_array__()
return (
isinstance(element, (jnp.ndarray, np.ndarray))
and jnp.issubdtype(element.dtype, jnp.inexact)
) or isinstance(element, (float, complex))
#
# Filtering/combining
#
def _make_filter_tree(mask: Union[bool, Callable[[Any], bool]], arg: Any) -> bool:
if isinstance(mask, bool):
return mask
elif callable(mask):
return jax.tree_map(mask, arg)
else:
raise ValueError("`filter_spec` must consist of booleans and callables only.")
def filter(
pytree: PyTree, filter_spec: PyTree, inverse: bool = False, replace: Any = None
) -> PyTree:
"""
Filters out the leaves of a PyTree not satisfying a condition. Those not satisfying
the condition are replaced with `replace`.
**Arguments:**
- `pytree` is any PyTree.
- `filter_spec` is a PyTree whose structure should be a prefix of the structure of
`pytree`. Each of its leaves should either be:
- `True`, in which case the leaf or subtree is kept;
- `False`, in which case the leaf or subtree is replaced with `replace`;
- a callable `Leaf -> bool`, in which case this is evaluted on the leaf or
mapped over the subtree, and the leaf kept or replaced as appropriate.
- `inverse` switches the truthy/falsey behaviour: falsey results are kept and
truthy results are replaced.
- `replace` is what to replace any falsey leaves with. Defaults to `None`.
**Returns:**
A PyTree of the same structure as `pytree`.
!!! info
A common special case is `equinox.filter(pytree, equinox.is_array)`. Then
`equinox.is_array` is evaluted on all of `pytree`'s leaves, and each leaf then
kept or replaced.
!!! info
See also [`equinox.combine`][] to reconstitute the PyTree again.
"""
inverse = bool(inverse) # just in case, to make the != trick below work reliably
filter_tree = jax.tree_map(_make_filter_tree, filter_spec, pytree)
return jax.tree_map(
lambda mask, x: x if bool(mask) != inverse else replace, filter_tree, pytree
)
def partition(pytree: PyTree, filter_spec: PyTree, replace: Any = None) -> PyTree:
"""Equivalent to `filter(...), filter(..., inverse=True)`, but slightly more
efficient.
"""
filter_tree = jax.tree_map(_make_filter_tree, filter_spec, pytree)
left = jax.tree_map(lambda mask, x: x if mask else replace, filter_tree, pytree)
right = jax.tree_map(lambda mask, x: replace if mask else x, filter_tree, pytree)
return left, right
def _combine(*args):
for arg in args:
if arg is not None:
return arg
return None
def _is_none(x):
return x is None
def combine(*pytrees: PyTree) -> PyTree:
"""Combines multiple PyTrees into one PyTree, by replacing `None` leaves.
!!! example
```python
pytree1 = [None, 1, 2]
pytree2 = [0, None, None]
equinox.combine(pytree1, pytree2) # [0, 1, 2]
```
!!! tip
The idea is that `equinox.combine` should be used to undo a call to
[`equinox.filter`][] or [`equinox.partition`][].
**Arguments:**
- `*pytrees`: a sequence of PyTrees all with the same structure.
**Returns:**
A PyTree with the same structure as its inputs. Each leaf will be the first
non-`None` leaf found in the corresponding leaves of `pytrees` as they are
iterated over.
"""
return jax.tree_map(_combine, *pytrees, is_leaf=_is_none)
#
# Deprecated
#
@deprecated(in_favour_of=filter)
def split(
pytree: PyTree,
filter_fn: Optional[Callable[[Any], bool]] = None,
filter_tree: Optional[PyTree] = None,
) -> Tuple[List[Any], List[Any], List[bool], TreeDef]:
validate_filters("split", filter_fn, filter_tree)
flat, treedef = jax.tree_flatten(pytree)
flat_true = []
flat_false = []
if filter_fn is None:
which, treedef_filter = jax.tree_flatten(filter_tree)
if treedef != treedef_filter:
raise ValueError(
"filter_tree must have the same tree structure as the PyTree being split."
)
for f, w in zip(flat, which):
if w:
flat_true.append(f)
else:
flat_false.append(f)
else:
which = []
for f in flat:
if filter_fn(f):
flat_true.append(f)
which.append(True)
else:
flat_false.append(f)
which.append(False)
return flat_true, flat_false, which, treedef
@deprecated(in_favour_of=combine)
def merge(
flat_true: List[Any], flat_false: List[Any], which: List[bool], treedef: TreeDef
):
flat = []
flat_true = iter(flat_true)
flat_false = iter(flat_false)
for element in which:
if element:
flat.append(next(flat_true))
else:
flat.append(next(flat_false))
return jax.tree_unflatten(treedef, flat)
# Internal and only used by deprecated functions
def validate_filters(fn_name, filter_fn, filter_tree):
if (filter_fn is None and filter_tree is None) or (
filter_fn is not None and filter_tree is not None
):
raise ValueError(
f"Precisely one of `filter_fn` and `filter_tree` should be passed to {fn_name}"
)
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Service'
db.create_table('cabotapp_service', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('url', self.gf('django.db.models.fields.TextField')(blank=True)),
('last_alert_sent', self.gf('django.db.models.fields.DateTimeField')
(null=True, blank=True)),
('email_alert', self.gf('django.db.models.fields.BooleanField')
(default=False)),
('hipchat_alert',
self.gf('django.db.models.fields.BooleanField')(default=True)),
('sms_alert', self.gf('django.db.models.fields.BooleanField')
(default=False)),
('telephone_alert',
self.gf('django.db.models.fields.BooleanField')(default=False)),
('alerts_enabled',
self.gf('django.db.models.fields.BooleanField')(default=True)),
('overall_status', self.gf('django.db.models.fields.TextField')
(default='PASSING')),
('old_overall_status',
self.gf('django.db.models.fields.TextField')(default='PASSING')),
('hackpad_id', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
))
db.send_create_signal('cabotapp', ['Service'])
# Adding M2M table for field users_to_notify on 'Service'
db.create_table('cabotapp_service_users_to_notify', (
('id', models.AutoField(verbose_name='ID',
primary_key=True, auto_created=True)),
('service',
models.ForeignKey(orm['cabotapp.service'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('cabotapp_service_users_to_notify',
['service_id', 'user_id'])
# Adding M2M table for field status_checks on 'Service'
db.create_table('cabotapp_service_status_checks', (
('id', models.AutoField(verbose_name='ID',
primary_key=True, auto_created=True)),
('service',
models.ForeignKey(orm['cabotapp.service'], null=False)),
('statuscheck',
models.ForeignKey(orm['cabotapp.statuscheck'], null=False))
))
db.create_unique('cabotapp_service_status_checks',
['service_id', 'statuscheck_id'])
# Adding model 'ServiceStatusSnapshot'
db.create_table('cabotapp_servicestatussnapshot', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('service', self.gf('django.db.models.fields.related.ForeignKey')
(related_name='snapshots', to=orm['cabotapp.Service'])),
('time', self.gf('django.db.models.fields.DateTimeField')()),
('num_checks_active',
self.gf('django.db.models.fields.IntegerField')(default=0)),
('num_checks_passing',
self.gf('django.db.models.fields.IntegerField')(default=0)),
('num_checks_failing',
self.gf('django.db.models.fields.IntegerField')(default=0)),
('overall_status', self.gf('django.db.models.fields.TextField')
(default='PASSING')),
('did_send_alert',
self.gf('django.db.models.fields.IntegerField')(default=False)),
))
db.send_create_signal('cabotapp', ['ServiceStatusSnapshot'])
# Adding model 'StatusCheck'
db.create_table('cabotapp_statuscheck', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('polymorphic_ctype', self.gf('django.db.models.fields.related.ForeignKey')
(related_name='polymorphic_cabotapp.statuscheck_set', null=True, to=orm['contenttypes.ContentType'])),
('name', self.gf('django.db.models.fields.TextField')()),
('active', self.gf('django.db.models.fields.BooleanField')
(default=True)),
('importance', self.gf('django.db.models.fields.CharField')
(default='ERROR', max_length=30)),
('frequency', self.gf('django.db.models.fields.IntegerField')
(default=5)),
('debounce', self.gf('django.db.models.fields.IntegerField')
(default=0, null=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['auth.User'])),
('calculated_status', self.gf('django.db.models.fields.CharField')
(default='passing', max_length=50, blank=True)),
('last_run', self.gf('django.db.models.fields.DateTimeField')
(null=True)),
('cached_health',
self.gf('django.db.models.fields.TextField')(null=True)),
('metric', self.gf('django.db.models.fields.TextField')
(null=True)),
('check_type', self.gf('django.db.models.fields.CharField')
(max_length=100, null=True)),
('value', self.gf('django.db.models.fields.TextField')(null=True)),
('expected_num_hosts', self.gf('django.db.models.fields.IntegerField')
(default=0, null=True)),
('endpoint', self.gf('django.db.models.fields.TextField')
(null=True)),
('username', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
('password', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
('text_match', self.gf('django.db.models.fields.TextField')
(null=True, blank=True)),
('status_code', self.gf('django.db.models.fields.TextField')
(default=200, null=True)),
('timeout', self.gf('django.db.models.fields.IntegerField')
(default=30, null=True)),
('max_queued_build_time',
self.gf(
'django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal('cabotapp', ['StatusCheck'])
# Adding model 'StatusCheckResult'
db.create_table('cabotapp_statuscheckresult', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('check', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['cabotapp.StatusCheck'])),
('time', self.gf('django.db.models.fields.DateTimeField')()),
('time_complete',
self.gf('django.db.models.fields.DateTimeField')(null=True)),
('raw_data', self.gf('django.db.models.fields.TextField')
(null=True)),
('succeeded', self.gf('django.db.models.fields.BooleanField')
(default=False)),
('error', self.gf('django.db.models.fields.TextField')(null=True)),
))
db.send_create_signal('cabotapp', ['StatusCheckResult'])
# Adding model 'UserProfile'
db.create_table('cabotapp_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')
(related_name='profile', unique=True, to=orm['auth.User'])),
('mobile_number', self.gf('django.db.models.fields.CharField')
(default='', max_length=20, blank=True)),
('hipchat_alias', self.gf('django.db.models.fields.CharField')
(default='', max_length=50, blank=True)),
('fallback_alert_user',
self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('cabotapp', ['UserProfile'])
# Adding model 'Shift'
db.create_table('cabotapp_shift', (
('id', self.gf('django.db.models.fields.AutoField')
(primary_key=True)),
('start', self.gf('django.db.models.fields.DateTimeField')()),
('end', self.gf('django.db.models.fields.DateTimeField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['auth.User'])),
('uid', self.gf('django.db.models.fields.TextField')()),
('deleted', self.gf('django.db.models.fields.BooleanField')
(default=False)),
))
db.send_create_signal('cabotapp', ['Shift'])
def backwards(self, orm):
# Deleting model 'Service'
db.delete_table('cabotapp_service')
# Removing M2M table for field users_to_notify on 'Service'
db.delete_table('cabotapp_service_users_to_notify')
# Removing M2M table for field status_checks on 'Service'
db.delete_table('cabotapp_service_status_checks')
# Deleting model 'ServiceStatusSnapshot'
db.delete_table('cabotapp_servicestatussnapshot')
# Deleting model 'StatusCheck'
db.delete_table('cabotapp_statuscheck')
# Deleting model 'StatusCheckResult'
db.delete_table('cabotapp_statuscheckresult')
# Deleting model 'UserProfile'
db.delete_table('cabotapp_userprofile')
# Deleting model 'Shift'
db.delete_table('cabotapp_shift')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cabotapp.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service'},
'alerts_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hackpad_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hipchat_alert': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_alert_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'old_overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'sms_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status_checks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cabotapp.StatusCheck']", 'symmetrical': 'False', 'blank': 'True'}),
'telephone_alert': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'users_to_notify': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'cabotapp.servicestatussnapshot': {
'Meta': {'object_name': 'ServiceStatusSnapshot'},
'did_send_alert': ('django.db.models.fields.IntegerField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_checks_active': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_failing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'num_checks_passing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'overall_status': ('django.db.models.fields.TextField', [], {'default': "'PASSING'"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'snapshots'", 'to': "orm['cabotapp.Service']"}),
'time': ('django.db.models.fields.DateTimeField', [], {})
},
'cabotapp.shift': {
'Meta': {'object_name': 'Shift'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'uid': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabotapp.statuscheck': {
'Meta': {'ordering': "['name']", 'object_name': 'StatusCheck'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cached_health': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'calculated_status': ('django.db.models.fields.CharField', [], {'default': "'passing'", 'max_length': '50', 'blank': 'True'}),
'check_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'debounce': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'endpoint': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'expected_num_hosts': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'default': "'ERROR'", 'max_length': '30'}),
'last_run': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'max_queued_build_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'metric': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'password': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_cabotapp.statuscheck_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'status_code': ('django.db.models.fields.TextField', [], {'default': '200', 'null': 'True'}),
'text_match': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.IntegerField', [], {'default': '30', 'null': 'True'}),
'username': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'cabotapp.statuscheckresult': {
'Meta': {'object_name': 'StatusCheckResult'},
'check': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabotapp.StatusCheck']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'succeeded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_complete': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'cabotapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'fallback_alert_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hipchat_alias': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cabotapp']
| |
# Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from oslo_serialization import jsonutils as json
from tempest.lib.services.volume.v3 import services_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestServicesClient(base.BaseServiceTest):
FAKE_SERVICE_LIST = {
"services": [
{
"status": "enabled",
"binary": "cinder-backup",
"zone": "nova",
"state": "up",
"updated_at": "2017-07-20T07:20:17.000000",
"host": "fake-host",
"disabled_reason": None
},
{
"status": "enabled",
"binary": "cinder-scheduler",
"zone": "nova",
"state": "up",
"updated_at": "2017-07-20T07:20:24.000000",
"host": "fake-host",
"disabled_reason": None
},
{
"status": "enabled",
"binary": "cinder-volume",
"zone": "nova",
"frozen": False,
"state": "up",
"updated_at": "2017-07-20T07:20:20.000000",
"host": "fake-host@lvm",
"replication_status": "disabled",
"active_backend_id": None,
"disabled_reason": None
}
]
}
FAKE_SERVICE_REQUEST = {
"host": "fake-host",
"binary": "cinder-volume"
}
FAKE_SERVICE_RESPONSE = {
"disabled": False,
"status": "enabled",
"host": "fake-host@lvm",
"service": "",
"binary": "cinder-volume",
"disabled_reason": None
}
def setUp(self):
super(TestServicesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = services_client.ServicesClient(fake_auth,
'volume',
'regionOne')
def _test_list_services(self, bytes_body=False,
mock_args='os-services', **params):
self.check_service_client_function(
self.client.list_services,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_SERVICE_LIST,
to_utf=bytes_body,
mock_args=[mock_args],
**params)
def _test_enable_service(self, bytes_body=False):
resp_body = self.FAKE_SERVICE_RESPONSE
kwargs = self.FAKE_SERVICE_REQUEST
payload = json.dumps(kwargs, sort_keys=True)
json_dumps = json.dumps
# NOTE: Use sort_keys for json.dumps so that the expected and actual
# payloads are guaranteed to be identical for mock_args assert check.
with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
self.check_service_client_function(
self.client.enable_service,
'tempest.lib.common.rest_client.RestClient.put',
resp_body,
to_utf=bytes_body,
mock_args=['os-services/enable', payload],
**kwargs)
def _test_disable_service(self, bytes_body=False):
resp_body = copy.deepcopy(self.FAKE_SERVICE_RESPONSE)
resp_body.pop('disabled_reason')
resp_body['disabled'] = True
resp_body['status'] = 'disabled'
kwargs = self.FAKE_SERVICE_REQUEST
payload = json.dumps(kwargs, sort_keys=True)
json_dumps = json.dumps
# NOTE: Use sort_keys for json.dumps so that the expected and actual
# payloads are guaranteed to be identical for mock_args assert check.
with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
self.check_service_client_function(
self.client.disable_service,
'tempest.lib.common.rest_client.RestClient.put',
resp_body,
to_utf=bytes_body,
mock_args=['os-services/disable', payload],
**kwargs)
def _test_disable_log_reason(self, bytes_body=False):
resp_body = copy.deepcopy(self.FAKE_SERVICE_RESPONSE)
resp_body['disabled_reason'] = "disabled for test"
resp_body['disabled'] = True
resp_body['status'] = 'disabled'
kwargs = copy.deepcopy(self.FAKE_SERVICE_REQUEST)
kwargs.update({"disabled_reason": "disabled for test"})
payload = json.dumps(kwargs, sort_keys=True)
json_dumps = json.dumps
# NOTE: Use sort_keys for json.dumps so that the expected and actual
# payloads are guaranteed to be identical for mock_args assert check.
with mock.patch.object(services_client.json, 'dumps') as mock_dumps:
mock_dumps.side_effect = lambda d: json_dumps(d, sort_keys=True)
self.check_service_client_function(
self.client.disable_log_reason,
'tempest.lib.common.rest_client.RestClient.put',
resp_body,
to_utf=bytes_body,
mock_args=['os-services/disable-log-reason', payload],
**kwargs)
def _test_freeze_host(self, bytes_body=False):
kwargs = {'host': 'host1@lvm'}
self.check_service_client_function(
self.client.freeze_host,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
**kwargs)
def _test_thaw_host(self, bytes_body=False):
kwargs = {'host': 'host1@lvm'}
self.check_service_client_function(
self.client.thaw_host,
'tempest.lib.common.rest_client.RestClient.put',
{},
bytes_body,
**kwargs)
def test_list_services_with_str_body(self):
self._test_list_services()
def test_list_services_with_bytes_body(self):
self._test_list_services(bytes_body=True)
def test_list_services_with_params(self):
mock_args = 'os-services?host=fake-host'
self._test_list_services(mock_args=mock_args, host='fake-host')
def test_enable_service_with_str_body(self):
self._test_enable_service()
def test_enable_service_with_bytes_body(self):
self._test_enable_service(bytes_body=True)
def test_disable_service_with_str_body(self):
self._test_disable_service()
def test_disable_service_with_bytes_body(self):
self._test_disable_service(bytes_body=True)
def test_disable_log_reason_with_str_body(self):
self._test_disable_log_reason()
def test_disable_log_reason_with_bytes_body(self):
self._test_disable_log_reason(bytes_body=True)
def test_freeze_host_with_str_body(self):
self._test_freeze_host()
def test_freeze_host_with_bytes_body(self):
self._test_freeze_host(bytes_body=True)
def test_thaw_host_with_str_body(self):
self._test_thaw_host()
def test_thaw_host_with_bytes_body(self):
self._test_thaw_host(bytes_body=True)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.compat.v1.test.compute_gradient and tf.compute_gradient_error."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
@ops.RegisterGradient("BadGrad")
def _bad_grad(unused_op, grad):
"""A gradient that returns the wrong shape."""
return array_ops.transpose(grad)
@ops.RegisterGradient("NaNGrad")
def _nan_grad(unused_op, grad):
"""A gradient that returns NaN."""
return np.nan * grad
class GradientCheckerTest(test.TestCase):
@test_util.run_deprecated_v1
def testAddSimple(self):
np.random.seed(1) # Fix seed to avoid flakiness
with self.session(use_gpu=False):
# a test case for Add operation
size = (2, 3)
x1 = constant_op.constant(2.0, shape=size, name="x1")
x2 = constant_op.constant(3.0, shape=size, name="x2")
y = math_ops.add(x1, x2, name="y")
# checking gradients for x1
error = gradient_checker.compute_gradient_error(x1, size, y, size)
tf_logging.info("x1 error = %f", error)
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testAddSimpleGPU(self):
np.random.seed(2) # Fix seed to avoid flakiness
with self.session():
# a test case for Add operation
size = (2, 3)
x1 = constant_op.constant(2.0, shape=size, name="x1")
x2 = constant_op.constant(3.0, shape=size, name="x2")
y = math_ops.add(x1, x2, name="y")
# checking gradients for x1
error = gradient_checker.compute_gradient_error(x1, size, y, size)
tf_logging.info("x1 error = %f", error)
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testAddCustomized(self):
np.random.seed(3) # Fix seed to avoid flakiness
with self.cached_session():
# a test case for Add operation
size = (2, 3)
x1 = constant_op.constant(
2.0, shape=size, dtype=dtypes.float64, name="x1")
x2 = constant_op.constant(
3.0, shape=size, dtype=dtypes.float64, name="x2")
y = math_ops.add(x1, x2, name="y")
# checkint gradients for x2 using a special init_value and delta
x_init_value = np.asarray(np.arange(6, dtype=np.float64).reshape(2, 3))
error = gradient_checker.compute_gradient_error(
x2, size, y, size, x_init_value=x_init_value, delta=1e-2)
tf_logging.info("x2 error = %f", error)
self.assertLess(error, 1e-10)
@test_util.run_deprecated_v1
def testGather(self):
np.random.seed(4) # Fix seed to avoid flakiness
with self.cached_session():
p_shape = (4, 2)
p_size = 8
index_values = [1, 3]
y_shape = [2, 2]
params = constant_op.constant(
np.arange(p_size).astype(np.float64), shape=p_shape, name="p")
indices = constant_op.constant(index_values, name="i")
y = array_ops.gather(params, indices, name="y")
error = gradient_checker.compute_gradient_error(params, p_shape, y,
y_shape)
tf_logging.info("gather error = %f", error)
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testNestedGather(self):
np.random.seed(5) # Fix seed to avoid flakiness
with self.cached_session():
p_shape = (8, 2)
p_size = 16
index_values = [1, 3, 5, 6]
index_values2 = [0, 2]
y2_shape = [2, 2]
params = constant_op.constant(
np.arange(p_size).astype(np.float64), shape=p_shape, name="p")
indices = constant_op.constant(index_values, name="i")
y = array_ops.gather(params, indices, name="y")
indices2 = constant_op.constant(index_values2, name="i2")
y2 = array_ops.gather(y, indices2, name="y2")
error = gradient_checker.compute_gradient_error(params, p_shape, y2,
y2_shape)
tf_logging.info("nested gather error = %f", error)
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testComplexMul(self):
with self.cached_session():
size = ()
c = constant_op.constant(5 + 7j, dtype=dtypes.complex64)
x = constant_op.constant(11 - 13j, dtype=dtypes.complex64)
y = c * x
analytical, numerical = gradient_checker.compute_gradient(x, size, y,
size)
correct = np.array([[5, 7], [-7, 5]])
self.assertAllEqual(correct, analytical)
self.assertAllClose(correct, numerical, rtol=1e-4)
self.assertLess(
gradient_checker.compute_gradient_error(x, size, y, size), 3e-4)
@test_util.run_deprecated_v1
def testComplexConj(self):
with self.cached_session():
size = ()
x = constant_op.constant(11 - 13j, dtype=dtypes.complex64)
y = math_ops.conj(x)
analytical, numerical = gradient_checker.compute_gradient(x, size, y,
size)
correct = np.array([[1, 0], [0, -1]])
self.assertAllEqual(correct, analytical)
self.assertAllClose(correct, numerical, rtol=2e-5)
self.assertLess(
gradient_checker.compute_gradient_error(x, size, y, size), 2e-5)
@test_util.run_deprecated_v1
def testEmptySucceeds(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32)
y = array_ops.identity(x)
for grad in gradient_checker.compute_gradient(x, (0, 3), y, (0, 3)):
self.assertEqual(grad.shape, (0, 0))
error = gradient_checker.compute_gradient_error(x, (0, 3), y, (0, 3))
self.assertEqual(error, 0)
def testEmptyFails(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = array_ops.placeholder(dtypes.float32)
with g.gradient_override_map({"Identity": "BadGrad"}):
y = array_ops.identity(x)
bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)"
with self.assertRaisesRegex(ValueError, bad):
gradient_checker.compute_gradient(x, (0, 3), y, (0, 3))
with self.assertRaisesRegex(ValueError, bad):
gradient_checker.compute_gradient_error(x, (0, 3), y, (0, 3))
def testNaNGradFails(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = array_ops.placeholder(dtypes.float32)
with g.gradient_override_map({"Identity": "NaNGrad"}):
y = array_ops.identity(x)
error = gradient_checker.compute_gradient_error(x, (), y, ())
# Typical test would assert error < max_err, so assert this test would
# raise AssertionError, since NaN is not < 1.0.
with self.assertRaisesRegex(AssertionError, "False is not true"):
self.assertTrue(error < 1.0)
class MiniMNISTTest(test.TestCase):
# Gradient checker for MNIST.
def _BuildAndTestMiniMNIST(self, param_index, tag):
# Fix seed to avoid occasional flakiness
np.random.seed(6)
# Hyperparameters
batch = 3
inputs = 16
features = 32
classes = 10
# Define the parameters
inp_data = np.random.random_sample(inputs * batch)
hidden_weight_data = np.random.randn(inputs * features) / np.sqrt(inputs)
hidden_bias_data = np.random.random_sample(features)
sm_weight_data = np.random.randn(features * classes) / np.sqrt(features)
sm_bias_data = np.random.random_sample(classes)
# special care for labels since they need to be normalized per batch
label_data = np.random.random(batch * classes).reshape((batch, classes))
s = label_data.sum(axis=1)
label_data /= s[:, None]
with self.session():
# We treat the inputs as "parameters" here
inp = constant_op.constant(
inp_data.tolist(),
shape=[batch, inputs],
dtype=dtypes.float64,
name="inp")
hidden_weight = constant_op.constant(
hidden_weight_data.tolist(),
shape=[inputs, features],
dtype=dtypes.float64,
name="hidden_weight")
hidden_bias = constant_op.constant(
hidden_bias_data.tolist(),
shape=[features],
dtype=dtypes.float64,
name="hidden_bias")
softmax_weight = constant_op.constant(
sm_weight_data.tolist(),
shape=[features, classes],
dtype=dtypes.float64,
name="softmax_weight")
softmax_bias = constant_op.constant(
sm_bias_data.tolist(),
shape=[classes],
dtype=dtypes.float64,
name="softmax_bias")
# List all the parameter so that we can test them one at a time
all_params = [
inp, hidden_weight, hidden_bias, softmax_weight, softmax_bias
]
param_sizes = [
[batch, inputs], # inp
[inputs, features], # hidden_weight,
[features], # hidden_bias
[features, classes], # softmax_weight,
[classes]
] # softmax_bias
# Now, Building MNIST
features = nn_ops.relu(
nn_ops.xw_plus_b(inp, hidden_weight, hidden_bias), name="features")
logits = nn_ops.xw_plus_b(
features, softmax_weight, softmax_bias, name="logits")
labels = constant_op.constant(
label_data.tolist(),
shape=[batch, classes],
dtype=dtypes.float64,
name="labels")
cost = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name="cost")
# Test the gradients.
err = gradient_checker.compute_gradient_error(
all_params[param_index],
param_sizes[param_index],
cost, [batch],
delta=1e-5)
tf_logging.info("Mini MNIST: %s gradient error = %g", tag, err)
return err
@test_util.run_deprecated_v1
def testInputGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(0, "input"), 1e-8)
@test_util.run_deprecated_v1
def testHiddenWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(1, "hidden_weight"), 1e-8)
@test_util.run_deprecated_v1
def testHiddenBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(2, "hidden_bias"), 1e-8)
@test_util.run_deprecated_v1
def testSoftmaxWeightGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(3, "softmax_weight"), 1e-8)
@test_util.run_deprecated_v1
def testSoftmaxBiasGradient(self):
self.assertLess(self._BuildAndTestMiniMNIST(4, "softmax_bias"), 1e-8)
if __name__ == "__main__":
test.main()
| |
from elasticsearch import Elasticsearch
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.ioff()
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.dates import AutoDateLocator, AutoDateFormatter
import numpy as np
from scipy import stats
import datetime as dt
import math
import json
import pprint
with open("config", "r+") as txt:
contents = list(map(str.rstrip, txt))
esCon = Elasticsearch([{
'host': contents[4], 'port': contents[5]
}], timeout=10000)
pp = pprint.PrettyPrinter(indent=4)
def utcDate(time):
return dt.datetime.fromtimestamp(time, dt.timezone.utc)
def utcStamp(time):
return (dt.datetime.strptime(time,'%Y-%m-%dT%X')).replace(tzinfo=dt.timezone.utc).timestamp()
scrollPreserve="3m"
startDate = "2017-02-14T00:00:00"
endDate = "2017-02-15T00:00:00"
utcStart = utcStamp(startDate)
utcEnd = utcStamp(endDate)
oneDay = np.multiply(24,np.multiply(60,60))
querySize = 10000
def esConAgg(field):
queryBody={"aggs": {
"dev": {
"terms": {"field":field}
}
}
}
scannerCon = esCon.search(index="net-health",
body=queryBody,
doc_type="DIGIRECO",
size=querySize,
search_type="query_then_fetch",
scroll=scrollPreserve)
scrollIdCon = scannerCon['aggregations']['dev']
conTotalRec = scrollIdCon['buckets']
arrRet = np.array([])
if conTotalRec == 0:
return None
else:
for hit in conTotalRec:
arrRet = np.append(arrRet, hit['key'])
return arrRet
def esClear(ids):
scannerCon = esCon.clear_scroll(scroll_id=ids)
return scannerCon
def esConQuery(src, dest, slot):
queryBody={"query" :
{"bool": {
"must": [
{"match" :
{"src" : src}
},
{"match" :
{"dest" : dest}
},
{"match" :
{"LastRemoteHost" : slot}
},
{"range" : {
"beginDate" : {
"gt" : int(utcStart),
"lt" : int((utcStart + oneDay))
}
}
}
]
}},
"sort": {"beginDate": {"order": "desc"}}
}
scannerCon = esCon.search(index="net-health",
doc_type="DIGIRECO",
body=queryBody,
size=querySize,
search_type="query_then_fetch",
scroll=scrollPreserve)
scrollIdCon = scannerCon['_scroll_id']
conTotalRec = scannerCon["hits"]["total"]
idList = []
arrRet = {}
if conTotalRec == 0:
return None
else:
while conTotalRec > 0:
idList.append(str(scrollIdCon))
responseCon = esCon.scroll(scroll_id=scrollIdCon,
scroll=scrollPreserve)
for hit in responseCon["hits"]["hits"]:
workflow = str(hit["_source"]["Workflow"])
if not workflow in arrRet:
arrRet[workflow] = {}
if 'meansrcThroughput' in hit["_source"]:
#if float(hit["_source"]["meansrcThroughput"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if 'meansrcPacket' in hit["_source"] and not 'meandestPacket' in hit["_source"]:
#if float(hit["_source"]["meansrcPacket"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcPacket' in arrRet[workflow]:
arrRet[workflow]['srcPacket'] = np.reshape(np.array([hit["_source"]["meansrcThroughput"],
hit["_source"]["meansrcPacket"],
float(0.0)]), (1,3))
else:
arrRet[workflow]['srcPacket'] = np.vstack((arrRet[workflow]['srcPacket'],
np.array([hit["_source"]["meansrcThroughput"],
hit["_source"]["meansrcPacket"],
float(0.0)])))
elif not 'meansrcPacket' in hit["_source"] and 'meandestPacket' in hit["_source"]:
#if float(hit["_source"]["meansrcPacket"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcPacket' in arrRet[workflow]:
arrRet[workflow]['srcPacket'] = np.reshape(np.array([hit["_source"]["meansrcThroughput"],
float(0.0),
hit["_source"]["meandestPacket"]]), (1,3))
else:
arrRet[workflow]['srcPacket'] = np.vstack((arrRet[workflow]['srcPacket'],
np.array([hit["_source"]["meansrcThroughput"],
float(0.0),
hit["_source"]["meandestPacket"]])))
elif 'meansrcPacket' in hit["_source"] and 'meandestPacket' in hit["_source"]:
#if float(hit["_source"]["meansrcPacket"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcPacket' in arrRet[workflow]:
arrRet[workflow]['srcPacket'] = np.reshape(np.array([hit["_source"]["meansrcThroughput"],
hit["_source"]["meansrcPacket"],
hit["_source"]["meandestPacket"]]), (1,3))
else:
arrRet[workflow]['srcPacket'] = np.vstack((arrRet[workflow]['srcPacket'],
np.array([hit["_source"]["meansrcThroughput"],
hit["_source"]["meansrcPacket"],
hit["_source"]["meandestPacket"]])))
if 'meansrcLatency' in hit["_source"] and not 'meandestLatency' in hit["_source"]:
#if float(hit["_source"]["meansrcLatency"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcLatency' in arrRet[workflow]:
arrRet[workflow]['srcLatency'] = np.reshape(np.array([hit["_source"]["meansrcThroughput"],
hit["_source"]["meansrcLatency"],
float(0.0)]), (1,3))
else:
arrRet[workflow]['srcLatency'] = np.vstack((arrRet[workflow]['srcLatency'],
np.array([hit["_source"]["meansrcThroughput"],
hit["_source"]["meansrcLatency"],
float(0.0)])))
elif not 'meansrcLatency' in hit["_source"] and 'meandestLatency' in hit["_source"]:
#if float(hit["_source"]["meansrcLatency"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcLatency' in arrRet[workflow]:
arrRet[workflow]['srcLatency'] = np.reshape(np.array([hit["_source"]["meansrcThroughput"],
float(0.0),
hit["_source"]["meandestLatency"]]), (1,3))
else:
arrRet[workflow]['srcLatency'] = np.vstack((arrRet[workflow]['srcLatency'],
np.array([hit["_source"]["meansrcThroughput"],
float(0.0),
hit["_source"]["meandestLatency"]])))
elif 'meansrcLatency' in hit["_source"] and 'meandestLatency' in hit["_source"]:
#if float(hit["_source"]["meansrcLatency"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcLatency' in arrRet[workflow]:
arrRet[workflow]['srcLatency'] = np.reshape(np.array([hit["_source"]["meansrcThroughput"],
hit["_source"]["meansrcLatency"],
hit["_source"]["meandestLatency"]]), (1,3))
else:
arrRet[workflow]['srcLatency'] = np.vstack((arrRet[workflow]['srcLatency'],
np.array([hit["_source"]["meansrcThroughput"],
hit["_source"]["meansrcLatency"],
hit["_source"]["meandestLatency"]])))
if 'meandestThroughput' in hit["_source"]:
#if float(hit["_source"]["meansrcThroughput"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if 'meansrcPacket' in hit["_source"] and not 'meandestPacket' in hit["_source"]:
#if float(hit["_source"]["meansrcPacket"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcPacket' in arrRet[workflow]:
arrRet[workflow]['srcPacket'] = np.reshape(np.array([hit["_source"]["meandestThroughput"],
hit["_source"]["meansrcPacket"],
float(0.0)]), (1,3))
else:
arrRet[workflow]['srcPacket'] = np.vstack((arrRet[workflow]['srcPacket'],
np.array([hit["_source"]["meandestThroughput"],
hit["_source"]["meansrcPacket"],
float(0.0)])))
elif not 'meansrcPacket' in hit["_source"] and 'meandestPacket' in hit["_source"]:
#if float(hit["_source"]["meansrcPacket"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcPacket' in arrRet[workflow]:
arrRet[workflow]['srcPacket'] = np.reshape(np.array([hit["_source"]["meandestThroughput"],
float(0.0),
hit["_source"]["meandestPacket"]]), (1,3))
else:
arrRet[workflow]['srcPacket'] = np.vstack((arrRet[workflow]['srcPacket'],
np.array([hit["_source"]["meandestThroughput"],
float(0.0),
hit["_source"]["meandestPacket"]])))
elif 'meansrcPacket' in hit["_source"] and 'meandestPacket' in hit["_source"]:
#if float(hit["_source"]["meansrcPacket"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcPacket' in arrRet[workflow]:
arrRet[workflow]['srcPacket'] = np.reshape(np.array([hit["_source"]["meandestThroughput"],
hit["_source"]["meansrcPacket"],
hit["_source"]["meandestPacket"]]), (1,3))
else:
arrRet[workflow]['srcPacket'] = np.vstack((arrRet[workflow]['srcPacket'],
np.array([hit["_source"]["meandestThroughput"],
hit["_source"]["meansrcPacket"],
hit["_source"]["meandestPacket"]])))
if 'meansrcLatency' in hit["_source"] and not 'meandestLatency' in hit["_source"]:
#if float(hit["_source"]["meansrcLatency"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcLatency' in arrRet[workflow]:
arrRet[workflow]['srcLatency'] = np.reshape(np.array([hit["_source"]["meandestThroughput"],
hit["_source"]["meansrcLatency"],
float(0.0)]), (1,3))
else:
arrRet[workflow]['srcLatency'] = np.vstack((arrRet[workflow]['srcLatency'],
np.array([hit["_source"]["meandestThroughput"],
hit["_source"]["meansrcLatency"],
float(0.0)])))
elif not 'meansrcLatency' in hit["_source"] and 'meandestLatency' in hit["_source"]:
#if float(hit["_source"]["meansrcLatency"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcLatency' in arrRet[workflow]:
arrRet[workflow]['srcLatency'] = np.reshape(np.array([hit["_source"]["meandestThroughput"],
float(0.0),
hit["_source"]["meandestLatency"]]), (1,3))
else:
arrRet[workflow]['srcLatency'] = np.vstack((arrRet[workflow]['srcLatency'],
np.array([hit["_source"]["meandestThroughput"],
float(0.0),
hit["_source"]["meandestLatency"]])))
elif 'meansrcLatency' in hit["_source"] and 'meandestLatency' in hit["_source"]:
#if float(hit["_source"]["meansrcLatency"]) > 0 and float(hit["_source"]["meanCpuEff"]) > 0 and float(hit["_source"]["meanEventRate"]) > 0:
if not 'srcLatency' in arrRet[workflow]:
arrRet[workflow]['srcLatency'] = np.reshape(np.array([hit["_source"]["meandestThroughput"],
hit["_source"]["meansrcLatency"],
hit["_source"]["meandestLatency"]]), (1,3))
else:
arrRet[workflow]['srcLatency'] = np.vstack((arrRet[workflow]['srcLatency'],
np.array([hit["_source"]["meandestThroughput"],
hit["_source"]["meansrcLatency"],
hit["_source"]["meandestLatency"]])))
conTotalRec -= len(responseCon['hits']['hits'])
esClear(idList)
return arrRet
#print(esConAgg("src"))
#print(esConAgg("dest"))
def main(utcStart):
with PdfPages('PDFOut/CMS_Network.pdf') as pc:
d = pc.infodict()
d['Title'] = 'CMS Scatter Plots'
d['Author'] = u'Jerrod T. Dixon\xe4nen'
d['Subject'] = 'Plot of network affects on grid jobs'
d['Keywords'] = 'PdfPages matplotlib CMS grid'
d['CreationDate'] = dt.datetime.today()
d['ModDate'] = dt.datetime.today()
countBit = {}
countBit["total"] = 0
with open("WorkOut/Network.out", "w") as ww:
while utcStart <= utcEnd:
print("Agg Query")
srcSites = esConAgg("src")
destSites = esConAgg("dest")
prevSites = esConAgg("LastRemoteHost")
workDate = utcDate(utcStart)
for ping in srcSites:
for pong in destSites:
for slot in prevSites:
print("Main Query")
qResults = esConQuery(ping, pong, slot)
if not type(qResults) == type(None):
ww.write(str(workDate.strftime('%d-%B-%Y') + "\n"))
for hit in qResults:
if str(hit) not in countBit:
countBit[str(hit)] = 0
if str(slot) not in countBit:
countBit[str(slot)] = 0
if 'srcPacket' in qResults[hit]:
srcPacket = qResults[hit]['srcPacket']
print("srcPacket")
pp.pprint(srcPacket)
cslope, cintercept, cr_value, cp_value, cstd_err = stats.linregress(srcPacket[:,0],srcPacket[:,1])
eslope, eintercept, er_value, ep_value, estd_err = stats.linregress(srcPacket[:,0],srcPacket[:,2])
if cp_value < 0.05 and ep_value < 0.05:
if (cslope > 0 and eslope < 0) or (cslope < 0 and eslope > 0):
countBit[str(hit)] += 1
countBit[str(slot)] += 1
countBit["total"] += 1
figsT, axsT = plt.subplots(2, sharex=True)
axsT[0].scatter(srcPacket[:,0],srcPacket[:,1])
axsT[1].scatter(srcPacket[:,0],srcPacket[:,2])
axsT[0].set_ylabel("meansrcPacket")
axsT[1].set_ylabel("meandestPacket")
axsT[1].set_xlabel("Source Throughput (" + hit + ")")
axsT[0].set_title(str(ping + " to " + pong + " at " + slot + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figsT)
plt.close(figsT)
ww.write(str("Workflow: " + hit + "\n"))
ww.write(str("Work site: " + ping + "\n"))
ww.write(str("Data site: " + pong + "\n"))
ww.write(str("Slot: " + slot + "\n"))
ww.write(str("Throughput value measured at work site\n"))
ww.write(str("X: Source Throughput Y: meansrcPacket\n"))
ww.write(str("c_Slope: " + str(cslope) + "\n"))
ww.write(str("c_Intercept: " + str(cintercept) + "\n"))
ww.write(str("c_R Value: " + str(cr_value) + "\n"))
ww.write(str("c_P Value: " + str(cp_value) + "\n"))
ww.write(str("c_std err: " + str(cstd_err) + "\n"))
ww.write(str("X: Source Throughput Y: meansrcPacket\n"))
ww.write(str("e_Slope: " + str(eslope) + "\n"))
ww.write(str("e_Intercept: " + str(eintercept) + "\n"))
ww.write(str("e_R Value: " + str(er_value) + "\n"))
ww.write(str("e_P Value: " + str(ep_value) + "\n"))
ww.write(str("e_std err: " + str(estd_err) + "\n"))
ww.write(str("\n\n"))
if 'srcLatency' in qResults[hit]:
srcLatency = qResults[hit]['srcLatency']
print("srcLatency")
pp.pprint(srcLatency)
eslope, eintercept, er_value, ep_value, estd_err = stats.linregress(srcLatency[:,0],srcLatency[:,1])
cslope, cintercept, cr_value, cp_value, cstd_err = stats.linregress(srcLatency[:,0],srcLatency[:,2])
if ep_value < 0.05 and cp_value < 0.05:
if (cslope > 0 and eslope < 0) or (cslope < 0 and eslope > 0):
countBit[str(hit)] += 1
countBit[str(slot)] += 1
countBit["total"] += 1
figdT, axdT = plt.subplots(2, sharex=True)
axdT[0].scatter(srcLatency[:,0],srcLatency[:,1])
axdT[1].scatter(srcLatency[:,0],srcLatency[:,2])
axdT[0].set_ylabel("meansrcLatency")
axdT[1].set_ylabel("meandestLatency")
axdT[1].set_xlabel("Destination Throughput (" + hit + ")")
axdT[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figdT)
plt.close(figdT)
ww.write(str("Workflow: " + hit + "\n"))
ww.write(str("Work site: " + ping + "\n"))
ww.write(str("Data site: " + pong + "\n"))
ww.write(str("Slot: " + slot + "\n"))
ww.write(str("Throughput value measured at data site\n"))
ww.write(str("X: Destination Throughput Y: meansrcLatency\n"))
ww.write(str("c_Slope: " + str(cslope) + "\n"))
ww.write(str("c_Intercept: " + str(cintercept) + "\n"))
ww.write(str("c_R Value: " + str(cr_value) + "\n"))
ww.write(str("c_P Value: " + str(cp_value) + "\n"))
ww.write(str("c_std err: " + str(cstd_err) + "\n"))
ww.write(str("X: Destination Throughput Y: meandestLatency\n"))
ww.write(str("e_Slope: " + str(eslope) + "\n"))
ww.write(str("e_Intercept: " + str(eintercept) + "\n"))
ww.write(str("e_R Value: " + str(er_value) + "\n"))
ww.write(str("e_P Value: " + str(ep_value) + "\n"))
ww.write(str("e_std err: " + str(estd_err) + "\n"))
ww.write(str("\n\n"))
if 'destPacket' in qResults[hit]:
destPacket = qResults[hit]['destPacket']
print("destPacket")
pp.pprint(destPacket)
cslope, cintercept, cr_value, cp_value, cstd_err = stats.linregress(destPacket[:,0],destPacket[:,1])
eslope, eintercept, er_value, ep_value, estd_err = stats.linregress(destPacket[:,0],destPacket[:,2])
if cp_value < 0.05 and ep_value < 0.05:
if (cslope > 0 and eslope < 0) or (cslope < 0 and eslope > 0):
countBit[str(hit)] += 1
countBit[str(slot)] += 1
countBit["total"] += 1
figsT, axsT = plt.subplots(2, sharex=True)
axsT[0].scatter(destPacket[:,0],destPacket[:,1])
axsT[1].scatter(destPacket[:,0],destPacket[:,2])
axsT[0].set_ylabel("meansrcPacket")
axsT[1].set_ylabel("meandestPacket")
axsT[1].set_xlabel("Destination Throughput (" + hit + ")")
axsT[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figsT)
plt.close(figsT)
ww.write(str("Workflow: " + hit + "\n"))
ww.write(str("Work site: " + ping + "\n"))
ww.write(str("Data site: " + pong + "\n"))
ww.write(str("Slot: " + slot + "\n"))
ww.write(str("Throughput value measured at work site\n"))
ww.write(str("X: Destination Throughput Y: meansrcPacket\n"))
ww.write(str("c_Slope: " + str(cslope) + "\n"))
ww.write(str("c_Intercept: " + str(cintercept) + "\n"))
ww.write(str("c_R Value: " + str(cr_value) + "\n"))
ww.write(str("c_P Value: " + str(cp_value) + "\n"))
ww.write(str("c_std err: " + str(cstd_err) + "\n"))
ww.write(str("X: Destination Throughput Y: meansrcPacket\n"))
ww.write(str("e_Slope: " + str(eslope) + "\n"))
ww.write(str("e_Intercept: " + str(eintercept) + "\n"))
ww.write(str("e_R Value: " + str(er_value) + "\n"))
ww.write(str("e_P Value: " + str(ep_value) + "\n"))
ww.write(str("e_std err: " + str(estd_err) + "\n"))
ww.write(str("\n\n"))
if 'destLatency' in qResults[hit]:
destLatency = qResults[hit]['destLatency']
print("destLatency")
pp.pprint(destLatency)
eslope, eintercept, er_value, ep_value, estd_err = stats.linregress(destLatency[:,0],destLatency[:,1])
cslope, cintercept, cr_value, cp_value, cstd_err = stats.linregress(destLatency[:,0],destLatency[:,2])
if ep_value < 0.05 and cp_value < 0.05:
if (cslope > 0 and eslope < 0) or (cslope < 0 and eslope > 0):
countBit[str(hit)] += 1
countBit[str(slot)] += 1
countBit["total"] += 1
figdT, axdT = plt.subplots(2, sharex=True)
axdT[0].scatter(destLatency[:,0],destLatency[:,1])
axdT[1].scatter(destLatency[:,0],destLatency[:,2])
axdT[0].set_ylabel("meansrcLatency")
axdT[1].set_ylabel("meandestLatency")
axdT[1].set_xlabel("Destination Throughput (" + hit + ")")
axdT[0].set_title(str(ping + " to " + pong + " on " + workDate.strftime('%d-%B-%Y')))
pc.savefig(figdT)
plt.close(figdT)
ww.write(str("Workflow: " + hit + "\n"))
ww.write(str("Work site: " + ping + "\n"))
ww.write(str("Data site: " + pong + "\n"))
ww.write(str("Slot: " + slot + "\n"))
ww.write(str("Throughput value measured at data site\n"))
ww.write(str("X: Destination Throughput Y: meansrcLatency\n"))
ww.write(str("c_Slope: " + str(cslope) + "\n"))
ww.write(str("c_Intercept: " + str(cintercept) + "\n"))
ww.write(str("c_R Value: " + str(cr_value) + "\n"))
ww.write(str("c_P Value: " + str(cp_value) + "\n"))
ww.write(str("c_std err: " + str(cstd_err) + "\n"))
ww.write(str("X: Destination Throughput Y: meandestLatency\n"))
ww.write(str("e_Slope: " + str(eslope) + "\n"))
ww.write(str("e_Intercept: " + str(eintercept) + "\n"))
ww.write(str("e_R Value: " + str(er_value) + "\n"))
ww.write(str("e_P Value: " + str(ep_value) + "\n"))
ww.write(str("e_std err: " + str(estd_err) + "\n"))
ww.write(str("\n\n"))
utcStart = utcStart + oneDay
ww.write("\n\n\n")
counter = 0
for hit in countBit:
if hit != "total":
counter += countBit[str(hit)]
ww.write(str(hit + " occurs " + str(countBit[str(hit)]/countBit["total"]) + "\n"))
ww.write("\n")
ww.write("Total occurs " + str(counter/countBit["total"]))
#axC[1].scatter(destRes[:,0],destRes[:,1])
#axC[1].set_ylabel("meanCpuEff")
# Run Main code
print("start")
main(utcStart)
print("finish")
| |
# stdlib
import hashlib
import re
from os import path
# 3rd-party modules
from lxml.builder import E
# local modules
from jnpr.junos.utils.util import Util
from jnpr.junos.utils.scp import SCP
from jnpr.junos.exception import SwRollbackError, RpcTimeoutError, RpcError
"""
Software Installation Utilities
"""
__all__ = ['SW']
def _hashfile(afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
class SW(Util):
"""
Software Utility class, used to perform a software upgrade and
associated functions. These methods have been tested on
*simple deployments*. Refer to **install** for restricted
use-cases for software upgrades.
**Primary methods:**
* :meth:`install`: perform the entire software installation process
* :meth:`reboot`: reboots the system for the new image to take effect
* :meth:`poweroff`: shutdown the system
**Helpers:** (Useful as standalone as well)
* :meth:`put`: SCP put package file onto Junos device
* :meth:`pkgadd`: performs the 'request' operation to install the package
* :meth:`validate`: performs the 'request' to validate the package
**Miscellaneous:**
* rollback: same as 'request software rollback'
* inventory: (property) provides file info for current and rollback
images on the device
"""
def __init__(self, dev):
Util.__init__(self, dev)
self._dev = dev
self._RE_list = [
x for x in dev.facts.keys() if x.startswith('version_RE')]
self._multi_RE = bool(len(self._RE_list) > 1)
self._multi_VC = bool(
self._multi_RE is True and dev.facts.get('vc_capable') is True)
self._mixed_VC = bool(dev.facts.get('vc_mode') == 'Mixed')
# -----------------------------------------------------------------------
# CLASS METHODS
# -----------------------------------------------------------------------
@classmethod
def local_sha256(cls, package):
"""
Computes the SHA-256 value on the package file.
:param str package:
File-path to the package (\*.tgz) file on the local server
:returns: SHA-256 checksum (str)
:raises IOError: when **package** file does not exist
"""
return _hashfile(open(package, 'rb'), hashlib.sha256())
@classmethod
def local_md5(cls, package):
"""
Computes the MD5 checksum value on the local package file.
:param str package:
File-path to the package (\*.tgz) file on the local server
:returns: MD5 checksum (str)
:raises IOError: when **package** file does not exist
"""
return _hashfile(open(package, 'rb'), hashlib.md5())
@classmethod
def local_sha1(cls, package):
"""
Computes the SHA1 checksum value on the local package file.
:param str package:
File-path to the package (\*.tgz) file on the local server
:returns: SHA1 checksum (str)
:raises IOError: when **package** file does not exist
"""
return _hashfile(open(package, 'rb'), hashlib.sha1())
@classmethod
def progress(cls, dev, report):
""" simple progress report function """
print dev.hostname + ": " + report
# -------------------------------------------------------------------------
# put - SCP put the image onto the device
# -------------------------------------------------------------------------
def put(self, package, remote_path='/var/tmp', progress=None):
"""
SCP 'put' the package file from the local server to the remote device.
:param str package:
File path to the package file on the local file system
:param str remote_path:
The directory on the device where the package will be copied to.
:param func progress:
Callback function to indicate progress. If set to ``True``
uses :meth:`scp._scp_progress` for basic reporting by default.
See that class method for details.
"""
# execute the secure-copy with the Python SCP module
with SCP(self._dev, progress=progress) as scp:
scp.put(package, remote_path)
# -------------------------------------------------------------------------
# pkgadd - used to perform the 'request system software add ...'
# -------------------------------------------------------------------------
def pkgadd(self, remote_package, **kvargs):
"""
Issue the 'request system software add' command on the package.
The "no-validate" options is set by default. If you want to validate
the image, do that using the specific :meth:`validate` method. Also,
if you want to reboot the device, suggest using the :meth:`reboot` method
rather ``reboot=True``.
:param str remote_package:
The file-path to the install package on the remote (Junos) device.
:param dict kvargs:
Any additional parameters to the 'request' command can
be passed within **kvargs**, following the RPC syntax
methodology (dash-2-underscore,etc.)
.. todo:: Add way to notify user why installation failed.
.. warning:: Refer to the restrictions listed in :meth:`install`.
"""
if isinstance(remote_package, (list, tuple)) and self._mixed_VC:
args = dict(no_validate=True, set=remote_package)
else:
args = dict(no_validate=True, package_name=remote_package)
args.update(kvargs)
rsp = self.rpc.request_package_add(**args)
got = rsp.getparent()
rc = int(got.findtext('package-result').strip())
# return True if rc == 0 else got.findtext('output').strip()
return True if rc == 0 else False
# -------------------------------------------------------------------------
# validate - perform 'request' operation to validate the package
# -------------------------------------------------------------------------
def validate(self, remote_package, **kwargs):
"""
Issues the 'request' operation to validate the package against the
config.
:returns:
* ``True`` if validation passes
* error (str) otherwise
"""
rsp = self.rpc.request_package_validate(
package_name=remote_package, **kwargs).getparent()
errcode = int(rsp.findtext('package-result'))
return True if 0 == errcode else rsp.findtext('output').strip()
def remote_checksum(self, remote_package, timeout=300):
"""
Computes the MD5 checksum on the remote device.
:param str remote_package:
The file-path on the remote Junos device
:param int timeout:
The amount of time (seconds) before declaring an RPC timeout.
The default RPC timeout is generally around 30 seconds. So this
:timeout: value will be used in the context of the checksum process.
Defaults to 5 minutes (5*60=300)
:returns:
* The MD5 checksum string
* ``False`` when the **remote_package** is not found.
:raises RpcError: RPC errors other than **remote_package** not found.
"""
try:
rsp = self.rpc.get_checksum_information(path=remote_package, dev_timeout=timeout)
return rsp.findtext('.//checksum').strip()
except RpcError as e:
if hasattr(e, 'errs') and ('No such file or directory' in e.errs['message']):
return None
else:
raise
# -------------------------------------------------------------------------
# safe_copy - copies the package and performs checksum
# -------------------------------------------------------------------------
def safe_copy(self, package, **kvargs):
"""
Copy the install package safely to the remote device. By default
this means to clean the filesystem to make space, perform the
secure-copy, and then verify the MD5 checksum.
:param str package:
file-path to package on local filesystem
:param str remote_path:
file-path to directory on remote device
:param func progress:
call-back function for progress updates. If set to ``True`` uses
:meth:`sw.progress` for basic reporting by default.
:param bool cleanfs:
When ``True`` (default) this method will perform the
"storage cleanup" on the device.
:param str checksum:
This is the checksum string as computed on the local system.
This value will be used to compare the checksum on the
remote Junos device.
:returns:
* ``True`` when the copy was successful
* ``False`` otherwise
"""
remote_path = kvargs.get('remote_path', '/var/tmp')
progress = kvargs.get('progress')
checksum = kvargs.get('checksum')
cleanfs = kvargs.get('cleanfs', True)
def _progress(report):
if progress is True:
self.progress(self._dev, report)
elif callable(progress):
progress(self._dev, report)
if checksum is None:
_progress('computing local checksum on: %s' % package)
checksum = SW.local_md5(package)
if cleanfs is True:
dto = self.dev.timeout
self.dev.timeout = 5 * 60
_progress('cleaning filesystem ...')
self.rpc.request_system_storage_cleanup()
self.dev.timeout = dto
# we want to give the caller an override so we don't always
# need to copy the file, but the default is to do this, yo!
self.put(package, remote_path, progress)
# validate checksum:
remote_package = remote_path + '/' + path.basename(package)
_progress('computing remote checksum on: %s' % remote_package)
remote_checksum = self.remote_checksum(remote_package)
if remote_checksum != checksum:
_progress("checksum check failed.")
return False
_progress("checksum check passed.")
return True
# -------------------------------------------------------------------------
# install - complete installation process, but not reboot
# -------------------------------------------------------------------------
def install(self, package=None, pkg_set=None, remote_path='/var/tmp', progress=None,
validate=False, checksum=None, cleanfs=True, no_copy=False,
timeout=1800, **kwargs):
"""
Performs the complete installation of the **package** that includes the
following steps:
1. computes the local MD5 checksum if not provided in :checksum:
2. performs a storage cleanup if :cleanfs: is True
3. SCP copies the package to the :remote_path: directory
4. computes remote MD5 checksum and matches it to the local value
5. validates the package if :validate: is True
6. installs the package
.. warning:: This process has been validated on the following deployments.
Tested:
* Single RE devices (EX, QFX, MX, SRX).
* MX dual-RE
* EX virtual-chassis when all same HW model
* QFX virtual-chassis when all same HW model
* QFX/EX mixed virtual-chassis
* Mixed mode VC
Known Restrictions:
* SRX cluster
* MX virtual-chassis
You can get a progress report on this process by providing a **progress**
callback.
.. note:: You will need to invoke the :meth:`reboot` method explicitly to reboot
the device.
:param str package:
The file-path to the install package tarball on the local filesystem
:param list pkg_set:
The file-paths as list/tuple of the install package tarballs on the local
filesystem which will be installed on mixed VC setup.
:param str remote_path:
The directory on the Junos device where the package file will be
SCP'd to or where the package is stored on the device; the default is ``/var/tmp``.
:param bool validate:
When ``True`` this method will perform a config validation against
the new image
:param str checksum:
MD5 hexdigest of the package file. If this is not provided, then this
method will perform the calculation. If you are planning on using the
same image for multiple updates, you should consider using the
:meth:`local_md5` method to pre calculate this value and then provide to
this method.
:param bool cleanfs:
When ``True`` will perform a 'storeage cleanup' before SCP'ing the
file to the device. Default is ``True``.
:param func progress:
If provided, this is a callback function with a function prototype
given the Device instance and the report string::
def myprogress(dev, report):
print "host: %s, report: %s" % (dev.hostname, report)
If set to ``True``, it uses :meth:`sw.progress`
for basic reporting by default.
:param bool no_copy:
When ``True`` the software package will not be SCP'd to the device.
Default is ``False``.
:param int timeout:
The amount of time (seconds) before declaring an RPC timeout. This
argument was added since most of the time the "package add" RPC
takes a significant amount of time. The default RPC timeout is
generally around 30 seconds. So this :timeout: value will be
used in the context of the SW installation process. Defaults to
30 minutes (30*60=1800)
:param bool force_host:
(Optional) Force the addition of host software package or bundle
(ignore warnings) on the QFX5100 device.
"""
def _progress(report):
if progress is True:
self.progress(self._dev, report)
elif callable(progress):
progress(self._dev, report)
# ---------------------------------------------------------------------
# perform a 'safe-copy' of the image to the remote device
# ---------------------------------------------------------------------
if package is None and pkg_set is None:
raise TypeError(
'install() takes atleast 1 argument package or pkg_set')
if no_copy is False:
copy_ok = True
if isinstance(package, str):
copy_ok = self.safe_copy(package, remote_path=remote_path,
progress=progress, cleanfs=cleanfs,
checksum=checksum)
if copy_ok is False:
return False
elif isinstance(pkg_set, (list, tuple)) and len(pkg_set) > 0:
for pkg in pkg_set:
# To disable cleanfs after 1st iteration
cleanfs = cleanfs and pkg_set.index(pkg) == 0
copy_ok = self.safe_copy(pkg, remote_path=remote_path,
progress=progress,
cleanfs=cleanfs,
checksum=checksum)
if copy_ok is False:
return False
else:
raise ValueError(
'proper value either package or pkg_set is missing')
# ---------------------------------------------------------------------
# at this point, the file exists on the remote device
# ---------------------------------------------------------------------
if package is not None:
remote_package = remote_path + '/' + path.basename(package)
if validate is True: # in case of Mixed VC it cant be used
_progress(
"validating software against current config,"
" please be patient ...")
v_ok = self.validate(remote_package, dev_timeout=timeout)
if v_ok is not True:
return v_ok # will be the string of output
if self._multi_RE is False:
# simple case of device with only one RE
_progress("installing software ... please be patient ...")
add_ok = self.pkgadd(
remote_package,
dev_timeout=timeout,
**kwargs)
return add_ok
else:
# we need to update multiple devices
if self._multi_VC is True:
ok = True
# extract the VC number out of the 'version_RE<n>' string
vc_members = [
re.search(
'(\d+)',
x).group(1) for x in self._RE_list]
for vc_id in vc_members:
_progress(
"installing software on VC member: {0} ... please be"
" patient ...".format(vc_id))
ok &= self.pkgadd(
remote_package,
member=vc_id,
dev_timeout=timeout,
**kwargs)
return ok
else:
# then this is a device with two RE that supports the "re0"
# and "re1" options to the command (M, MX tested only)
ok = True
_progress(
"installing software on RE0 ... please be patient ...")
ok &= self.pkgadd(
remote_package,
re0=True,
dev_timeout=timeout,
**kwargs)
_progress(
"installing software on RE1 ... please be patient ...")
ok &= self.pkgadd(
remote_package,
re1=True,
dev_timeout=timeout,
**kwargs)
return ok
elif isinstance(pkg_set, (list, tuple)) and self._mixed_VC:
pkg_set = [
remote_path +
'/' +
path.basename(pkg) for pkg in pkg_set]
_progress("installing software ... please be patient ...")
add_ok = self.pkgadd(pkg_set, dev_timeout=timeout, **kwargs)
return add_ok
# -------------------------------------------------------------------------
# reboot - system reboot
# -------------------------------------------------------------------------
def reboot(self, in_min=0, at=None):
"""
Perform a system reboot, with optional delay (in minutes) or at
a specified date and time.
If the device is equipped with dual-RE, then both RE will be
rebooted. This code also handles EX/QFX VC.
:param int in_min: time (minutes) before rebooting the device.
:param str at: date and time the reboot should take place. The
string must match the junos cli reboot syntax
:returns:
* reboot message (string) if command successful
:raises RpcError: when command is not successful.
.. todo:: need to better handle the exception event.
"""
if in_min >= 0 and at is None:
cmd = E('request-reboot', E('in', str(in_min)))
else:
cmd = E('request-reboot', E('at', str(at)))
if self._multi_RE is True and self._multi_VC is False:
cmd.append(E('both-routing-engines'))
elif self._mixed_VC is True:
cmd.append(E('all-members'))
try:
rsp = self.rpc(cmd)
got = rsp.getparent().findtext('.//request-reboot-status').strip()
return got
except RpcTimeoutError as err:
raise err
except Exception as err:
if err.rsp.findtext('.//error-severity') != 'warning':
raise err
# -------------------------------------------------------------------------
# poweroff - system shutdown
# -------------------------------------------------------------------------
def poweroff(self, in_min=0):
"""
Perform a system shutdown, with optional delay (in minutes) .
If the device is equipped with dual-RE, then both RE will be
rebooted. This code also handles EX/QFX VC.
:param int in_min: time (minutes) before rebooting the device.
:returns:
* reboot message (string) if command successful
:raises RpcError: when command is not successful.
.. todo:: need to better handle the exception event.
"""
cmd = E('request-power-off', E('in', str(in_min)))
if self._multi_RE is True and self._multi_VC is False:
cmd.append(E('both-routing-engines'))
try:
rsp = self.rpc(cmd)
return rsp.getparent().findtext('.//request-reboot-status').strip()
except Exception as err:
if err.rsp.findtext('.//error-severity') != 'warning':
raise err
# -------------------------------------------------------------------------
# rollback - clears the install request
# -------------------------------------------------------------------------
def rollback(self):
"""
Issues the 'request' command to do the rollback and returns the string
output of the results.
:returns:
Rollback results (str)
"""
rsp = self.rpc.request_package_rollback()
fail_list = ['Cannot rollback', 'rollback aborted']
multi = rsp.xpath('//multi-routing-engine-item')
if multi:
rsp = {}
for x in multi:
re = x.findtext('re-name')
output = x.findtext('output')
if any(x in output for x in fail_list):
raise SwRollbackError(re=re, rsp=output)
else:
rsp[re] = output
return str(rsp)
else:
output = rsp.xpath('//output')[0].text
if any(x in output for x in fail_list):
raise SwRollbackError(rsp=output)
else:
return output
# -------------------------------------------------------------------------
# inventory - file info on current and rollback packages
# -------------------------------------------------------------------------
@property
def inventory(self):
"""
Returns dictionary of file listing information for current and rollback
Junos install packages. This information comes from the /packages
directory.
.. warning:: Experimental method; may not work on all platforms. If
you find this not working, please report issue.
"""
from jnpr.junos.utils.fs import FS
fs = FS(self.dev)
pkgs = fs.ls('/packages')
return dict(current=pkgs['files'].get(
'junos'), rollback=pkgs['files'].get('junos.old'))
| |
"""
Django settings for EDI 2017 project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (edi/config/settings/base.py - 3 = edi/)
APPS_DIR = ROOT_DIR.path('edi')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'webpack_loader', # webpack
'solo', # Django Solo Singleton Models
'markdownx',
'adminsortable2', # Sortable Models
]
# Apps specific for this project go here.
LOCAL_APPS = [
# custom users app
'edi.users.apps.UsersConfig',
'edi.content'
# Your stuff: custom apps go here
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'edi.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Sasha Cuerda""", 'scuerda@ctdata.org'),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///edi'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD STORAGE SETTINGS
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'edi.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'edi.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'js/bundles/',
# 'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
'STATS_FILE': str(APPS_DIR.path('static/js/webpack-stats.json'))
}
}
# Markdown Settings
MARKDOWNX_MARKDOWN_EXTENSIONS = [
'markdown.extensions.attr_list'
]
| |
# -*- coding: utf-8 -*-
"""
This module implements a class that implements a latex command.
This can be used directly or it can be inherited to make an easier interface
to it.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
from .latex_object import LatexObject
from ..utils import dumps_list
class CommandBase(LatexObject):
"""A class that represents a LaTeX command.
The name of this class (when lowercased) will be the name of this command.
To supply a different name set the ``_latex_name`` attribute.
"""
def __init__(self, arguments=None, options=None, *,
extra_arguments=None):
r"""
Args
----
arguments: None, str, list or `~.Arguments`
The main arguments of the command.
options: None, str, list or `~.Options`
Options of the command. These are placed in front of the arguments.
extra_arguments: None, str, list or `~.Arguments`
Extra arguments for the command. When these are supplied the
options will be placed before them instead of before the normal
arguments. This allows for a way of having one or more arguments
before the options.
"""
self._set_parameters(arguments, 'arguments')
self._set_parameters(options, 'options')
if extra_arguments is None:
self.extra_arguments = None
else:
self._set_parameters(extra_arguments, 'extra_arguments')
super().__init__()
def _set_parameters(self, parameters, argument_type):
parameter_cls = Options if argument_type == 'options' else Arguments
if parameters is None:
parameters = parameter_cls()
elif not isinstance(parameters, parameter_cls):
parameters = parameter_cls(parameters)
# Pass on escaping to generated parameters
parameters._default_escape = self._default_escape
setattr(self, argument_type, parameters)
def __key(self):
"""Return a hashable key, representing the command.
Returns
-------
tuple
"""
return (self.latex_name, self.arguments, self.options,
self.extra_arguments)
def __eq__(self, other):
"""Compare two commands.
Args
----
other: `~.Command` instance
The command to compare this command to
Returns
-------
bool:
If the two instances are equal
"""
if isinstance(other, Command):
return self.__key() == other.__key()
return False
def __hash__(self):
"""Calculate the hash of a command.
Returns
-------
int:
The hash of the command
"""
return hash(self.__key())
def dumps(self):
"""Represent the command as a string in LaTeX syntax.
Returns
-------
str
The LaTeX formatted command
"""
options = self.options.dumps()
arguments = self.arguments.dumps()
if self.extra_arguments is None:
return r'\{command}{options}{arguments}'\
.format(command=self.latex_name, options=options,
arguments=arguments)
extra_arguments = self.extra_arguments.dumps()
return r'\{command}{arguments}{options}{extra_arguments}'\
.format(command=self.latex_name, arguments=arguments,
options=options, extra_arguments=extra_arguments)
class Command(CommandBase):
"""A class that represents a LaTeX command.
This class is meant for one-off commands. When a command of the same type
is used multiple times it is better to subclass `.CommandBase`.
"""
_repr_attributes_mapping = {'command': 'latex_name'}
def __init__(self, command=None, arguments=None, options=None, *,
extra_arguments=None, packages=None):
r"""
Args
----
command: str
Name of the command
arguments: None, str, list or `~.Arguments`
The main arguments of the command.
options: None, str, list or `~.Options`
Options of the command. These are placed in front of the arguments.
extra_arguments: None, str, list or `~.Arguments`
Extra arguments for the command. When these are supplied the
options will be placed before them instead of before the normal
arguments. This allows for a way of having one or more arguments
before the options.
packages: list of `~.Package` instances
A list of the packages that this command requires
Examples
--------
>>> Command('documentclass',
>>> options=Options('12pt', 'a4paper', 'twoside'),
>>> arguments='article').dumps()
'\\documentclass[12pt,a4paper,twoside]{article}'
>>> Command('com')
'\\com'
>>> Command('com', 'first')
'\\com{first}'
>>> Command('com', 'first', 'option')
'\\com[option]{first}'
>>> Command('com', 'first', 'option', 'second')
'\\com{first}[option]{second}'
"""
self.latex_name = command
if packages is not None:
self.packages |= packages
super().__init__(arguments, options, extra_arguments=extra_arguments)
class UnsafeCommand(Command):
"""An unsafe version of the `Command` class.
This class is meant for one-off commands that should not escape their
arguments and options. Use this command with care and only use this when
the arguments are hardcoded.
When an unsafe command of the same type is used multiple times it is better
to subclass `.CommandBase` and set the ``_default_escape`` attribute to
false.
"""
_default_escape = False
class Parameters(LatexObject):
"""The base class used by `~Options` and `~Arguments`.
This class should probably never be used on its own and inhereting from it
is only useful if a class like `~Options` or `~Arguments` is needed again.
"""
def __init__(self, *args, **kwargs):
r"""
Args
----
\*args:
Positional parameters
\*\*kwargs:
Keyword parameters
"""
if len(args) == 1 and hasattr(args[0], '__iter__') and\
not isinstance(args[0], str):
args = args[0]
self._positional_args = list(args)
self._key_value_args = dict(kwargs)
super().__init__()
def __key(self):
"""Generate a unique hashable key representing the parameter object.
Returns
-------
tuple
"""
return tuple(self._list_args_kwargs())
def __eq__(self, other):
"""Compare two parameters.
Returns
-------
bool
"""
return type(self) == type(other) and self.__key() == other.__key()
def __hash__(self):
"""Generate a hash of the parameters.
Returns
-------
int
"""
return hash(self.__key())
def _format_contents(self, prefix, separator, suffix):
"""Format the parameters.
The formatting is done using the three arguments suplied to this
function.
Arguments
---------
prefix: str
separator: str
suffix: str
Returns
-------
str
"""
params = self._list_args_kwargs()
if len(params) <= 0:
return ''
string = prefix + dumps_list(params, escape=self.escape,
token=separator) + suffix
return string
def _list_args_kwargs(self):
"""Make a list of strings representing al parameters.
Returns
-------
list
"""
params = []
params.extend(self._positional_args)
params.extend(['{k}={v}'.format(k=k, v=v) for k, v in
self._key_value_args.items()])
return params
class Options(Parameters):
"""A class implementing LaTex options for a command.
It supports normal positional parameters, as well as key-value pairs.
Options are the part of a command located between the square brackets
(``[]``). The positional parameters will be outputted in order and will
appear before the key-value-pairs. The key value-pairs won't be outputted
in the order in which they were entered
Examples
--------
>>> args = Options('a', 'b', 'c').dumps()
'[a,b,c]'
>>> Options('clip', width=50, height='25em', trim='1 2 3 4').dumps()
'[clip,trim=1 2 3 4,width=50,height=25em]'
"""
def dumps(self):
"""Represent the parameters as a string in LaTeX syntax.
This is to be appended to a command.
Returns
-------
str
"""
return self._format_contents('[', ',', ']')
class Arguments(Parameters):
"""A class implementing LaTex arguments for a command.
It supports normal positional parameters, as well as key-value pairs.
Arguments are the part of a command located between the curly braces
(``{}``). The positional parameters will be outputted in order and will
appear before the key-value-pairs. The key value-pairs won't be outputted
in the order in which they were entered
Examples
--------
>>> args = Arguments('a', 'b', 'c').dumps()
'{a}{b}{c}'
>>> args = Arguments('clip', width=50, height='25em').dumps()
>>> args.dumps()
'{clip}{width=50}{height=25em}'
"""
def dumps(self):
"""Represent the parameters as a string in LaTeX syntax.
This is to be appended to a command.
Returns
-------
str
"""
return self._format_contents('{', '}{', '}')
| |
# Gum sound editor (https://github.com/stackp/Gum)
# Copyright 2009 (C) Pierre Duquesne <stackp@online.fr>
# Licensed under the Revised BSD License.
import gtk
import gobject
import cairo
try:
from gum import fast
except ImportError:
HAVE_FAST = False
else:
HAVE_FAST = True
# -- Base classes for drawing sound visualization.
#
# CairoWidget, LayeredCairoWidget, and LayeredGraphView are defined as
# successive subclasses only for code clarity. Everything could as
# well be stuck in LayeredGraphView.
#
class CairoWidget(gtk.DrawingArea):
__gsignals__ = {"expose-event": "override"}
def __init__(self):
gtk.DrawingArea.__init__(self)
self._redrawing = False
def do_expose_event(self, event):
context = self.window.cairo_create()
context.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
context.clip()
width, height = self.window.get_size()
self.draw(context, width, height)
self._redrawing = False
def redraw(self):
# queue_draw() emits an expose event. Double buffering is used
# automatically in the expose event handler.
if not self._redrawing:
self._redrawing = True
self.queue_draw()
def draw(self, context, width, height):
"""Must be overriden to draw to the cairo context."""
pass
class LayeredCairoWidget(CairoWidget):
"""A widget with several layers.
This widget paints itself by successively passing its context to
layer objects. The draw() method of a layer object must paint to
the context.
"""
def __init__(self):
super(LayeredCairoWidget, self).__init__()
self.layers = []
def draw(self, context, width, height):
for layer in self.layers:
layer.stack(context, width, height)
class LayeredGraphView(LayeredCairoWidget):
"""A layered widget dedicated to a Graph object.
Every time the widget is resized, the new width is passed to the
Graph object.
"""
def __init__(self, graph):
super(LayeredGraphView, self).__init__()
self._graph = graph
self.connect("size_allocate", self.resized)
def resized(self, widget, rect):
self._graph.set_width(rect.width)
# -- The sound visualization widget, composed of several layers:
#
# * waveform
# * selection
# * cursor
#
class GraphView(LayeredGraphView):
"""Sound visualization widget for the main window.
* Four graphical layers: background, waveform, selection, and cursor.
* Mouse event listeners act on models (scroll, selection, middle-click).
"""
__gsignals__ = {'selection-changed': (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
())}
def __init__(self, graph, selection, cursor):
super(GraphView, self).__init__(graph)
self.layers.append(BackgroundLayer(self, selection))
self.layers.append(WaveformLayer(self, graph))
self.layers.append(SelectionLayer(self, selection))
self.layers.append(CursorLayer(self, cursor))
MouseSelection(self, selection)
MouseScroll(self, graph)
MouseMiddleClick(self, graph)
PointerStyle(self, selection)
self.connect("destroy", self.on_destroy)
def on_destroy(self, widget):
# Lose the references to Layers objects, otherwise they do not
# get garbage-collected. I suspect a strange interaction
# between the gobject and the Python reference counting
# systems.
self.layers = []
# -- Layers that can be added to LayeredGraphview.
#
class Layer(object):
"""Base class for layers."""
def __init__(self, layered):
self._layered = layered
def stack(self, context, width, height):
"""Paint the layer on top of the passed context."""
context.set_operator(cairo.OPERATOR_OVER)
self.draw(context, width, height)
def update(self):
self._layered.redraw()
def draw(context, width, height):
raise NotImplemented
class CachedLayer(Layer):
"""Implements surface caching."""
def __init__(self, layered):
self._layered = layered
self._must_draw = True
self._surface = None
layered.connect("size_allocate", self.resized)
def resized(self, widget, rect):
self._surface = None
self._must_draw = True
def update(self):
self._must_draw = True
Layer.update(self)
def stack(self, context, width, height):
if self._surface is None:
surface = context.get_target()
self._surface = surface.create_similar(cairo.CONTENT_COLOR_ALPHA,
width, height)
if self._must_draw:
# clear the cached surface
c = cairo.Context(self._surface)
c.set_operator(cairo.OPERATOR_CLEAR)
c.paint()
c.set_operator(cairo.OPERATOR_OVER)
self.draw(c, width, height)
self._must_draw = False
context.set_source_surface(self._surface, 0, 0)
context.set_operator(cairo.OPERATOR_OVER)
context.paint()
class WaveformLayer(CachedLayer):
"""A layer for LayeredGraphView.
It paints the graph (the waveform).
"""
def __init__(self, layered, graph):
CachedLayer.__init__(self, layered)
self._graph = graph
self.wavecolor = 0.0, 0.47058823529411764, 1.0
graph.changed.connect(self.update)
def draw_channel(self, values, context, ystart, width, height):
# Line at zero
context.set_line_width(1)
context.set_source_rgb(0.2, 0.2, 0.2)
context.move_to(0, ystart + round(height / 2) + 0.5)
context.line_to(width, ystart + round(height / 2) + 0.5)
context.stroke()
# Waveform
context.set_source_rgb(*self.wavecolor)
for x, (mini, maxi) in enumerate(values):
# -1 <= mini <= maxi <= 1
# ystart <= ymin <= ymax <= ystart + height - 1
ymin = ystart + round((-mini * 0.5 + 0.5) * (height - 1))
ymax = ystart + round((-maxi * 0.5 + 0.5) * (height - 1))
if ymin == ymax:
# Fill one pixel
context.rectangle(x, ymin, 1, 1)
context.fill()
else:
# Draw a line from min to max
context.move_to(x + 0.5, ymin)
context.line_to(x + 0.5, ymax)
context.stroke()
if HAVE_FAST:
draw_channel = fast.draw_channel
def draw(self, context, width, height):
channels = self._graph.channels()
numchan = len(channels)
for i in range(numchan):
y = (height / numchan) * i
self.draw_channel(channels[i], context, y, width, height / numchan)
class BackgroundLayer(Layer):
"""A layer for LayeredGraphView.
"""
def __init__(self, layered, selection):
Layer.__init__(self, layered)
self._selection = selection
self._selection.changed.connect(self.update)
def draw(self, context, width, height):
# Black background
context.set_source_rgb(0, 0, 0)
context.paint()
# Selection background
if self._selection.selected():
context.set_source_rgb(0, 0, 0.2)
start, end = self._selection.pixels()
context.rectangle(start, 0, end - start, height)
context.fill()
class SelectionLayer(Layer):
"""A layer for LayeredGraphView.
It highlights the selected area.
"""
def __init__(self, layered, selection):
Layer.__init__(self, layered)
self._selection = selection
self._selection.changed.connect(self.update)
def draw(self, context, width, height):
if self._selection.selected():
start, end = self._selection.pixels()
context.set_source_rgba(0, 0, 0, 0.5)
context.rectangle(0, 0, start, height)
context.rectangle(end, 0, width - end, height)
context.fill()
class CursorLayer(Layer):
def __init__(self, layered, cursor):
Layer.__init__(self, layered)
self._cursor = cursor
self._cursor.changed.connect(self.update)
self.rgba = (1, 1, 1, 0.5)
def draw(self, context, width, height):
x = self._cursor.pixel()
context.set_source_rgba(*self.rgba)
context.set_line_width(1)
context.move_to(x + 0.5, 0)
context.line_to(x + 0.5, height)
context.stroke()
# -- Mouse event listeners that act on models.
#
def near(x, y):
return abs(x - y) < 10
class MouseScroll(object):
"""Listens for mouse wheel events and scroll a graph.
Must be attached to a gtk.Widget and a Graph.
"""
def __init__(self, widget, graph):
self._graph = graph
widget.add_events(gtk.gdk.SCROLL_MASK)
widget.connect("scroll_event", self.scroll_event)
def scroll_event(self, widget, event):
MOD1 = event.state & gtk.gdk.MOD1_MASK
LEFT = event.direction is gtk.gdk.SCROLL_LEFT
RIGHT = event.direction is gtk.gdk.SCROLL_RIGHT
UP = event.direction is gtk.gdk.SCROLL_UP
DOWN = event.direction is gtk.gdk.SCROLL_DOWN
if LEFT or (UP and MOD1):
self._graph.scroll_left()
elif RIGHT or (DOWN and MOD1):
self._graph.scroll_right()
elif UP:
self._graph.zoom_in_on(event.x)
elif DOWN:
self._graph.zoom_out_on(event.x)
class MouseSelection(object):
"""Listens for mouse events and select graph area.
Must be attached to a gtk.Widget and a Selection.
"""
def __init__(self, widget, selection):
self.widget = widget
self._selection = selection
self.pressed = False
widget.add_events(gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK)
widget.connect("button_press_event", self.button_press)
widget.connect("button_release_event", self.button_release)
widget.connect("motion_notify_event", self.motion_notify)
def button_press(self, widget, event):
if event.button == 1:
self.pressed = True
x = event.x
pstart, pend = self._selection.pixels()
# a double click resumes selection.
if event.type == gtk.gdk._2BUTTON_PRESS:
self._selection.pin(x)
# extend towards left
elif self._selection.selected() and near(pstart, x):
self._selection.move_start_to_pixel(x)
# extend towards right
elif self._selection.selected() and near(pend, x):
self._selection.move_end_to_pixel(x)
# start fresh selection
else:
self._selection.pin(x)
def button_release(self, widget, event):
if event.button == 1:
self.pressed = False
self.widget.emit("selection-changed")
def motion_notify(self, widget, event):
if self.pressed:
x = event.window.get_pointer()[0]
self._selection.extend(x)
class MouseMiddleClick(object):
"""Shift the wave display when the middle button is pressed."""
def __init__(self, widget, graph):
self.widget = widget
self.graph = graph
self.pressed = False
widget.add_events(gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK)
widget.connect("button_press_event", self.button_press)
widget.connect("button_release_event", self.button_release)
widget.connect("motion_notify_event", self.motion_notify)
def button_press(self, widget, event):
if event.button == 2:
self.pressed = True
self._xlast = event.x
def button_release(self, widget, event):
if event.button == 2:
self.pressed = False
def motion_notify(self, widget, event):
if self.pressed:
x = event.window.get_pointer()[0]
delta = self._xlast - x
self._xlast = x
start, _ = self.graph.view()
self.graph.move_to(start + delta * self.graph.density)
class PointerStyle(object):
"""Change the pointer style.
Show a hand when the middle click is pressed, otherwise show
special cursors if the pointer is next to a selection bound.
"""
LEFT_SIDE = gtk.gdk.Cursor(gtk.gdk.LEFT_SIDE)
RIGHT_SIDE = gtk.gdk.Cursor(gtk.gdk.RIGHT_SIDE)
HAND = gtk.gdk.Cursor(gtk.gdk.HAND1)
def __init__(self, widget, selection):
self.widget = widget
self._selection = selection
self.pressed = False
widget.add_events(gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK)
widget.connect("button_press_event", self.button_press)
widget.connect("button_release_event", self.button_release)
widget.connect("motion_notify_event", self.motion_notify)
def button_press(self, widget, event):
if event.button == 2:
self.pressed = True
self.widget.window.set_cursor(self.HAND)
def button_release(self, widget, event):
if event.button == 2:
self.pressed = False
self.widget.window.set_cursor(None)
def motion_notify(self, widget, event):
if not self.pressed:
style = None
x = event.window.get_pointer()[0]
if self._selection.selected():
start, end = self._selection.pixels()
if near(start, x):
style = self.LEFT_SIDE
elif near(end, x):
style = self.RIGHT_SIDE
self.widget.window.set_cursor(style)
# -- Horizontal scrollbar, subclassed to control a Graph object.
#
class GraphScrollbar(gtk.HScrollbar):
"""An horizontal scrollbar tied to a Graph.
Acts on a graph object and is updated when the graph
object changes.
"""
def __init__(self, graph):
self._adjustment = gtk.Adjustment(0, 0, 1, 0.1, 0, 1)
super(GraphScrollbar, self).__init__(self._adjustment)
self._graph = graph
self._graph.changed.connect(self.update_scrollbar)
self.connect("value-changed", self.update_model)
# When the scrollbar or the graph model changes, this
# attribute must be set to True to avoid infinite feedback
# between them. Example of what happens otherwise: scrollbar
# changes --> graph changes --> scrollbar changes --> ...
self.inhibit = False
def update_model(self, widget):
"""Changes the model.
Called when the scrollbar has been moved by the user.
"""
if not self.inhibit:
self.inhibit = True
self._graph.move_to(self._adjustment.value)
self.inhibit = False
def update_scrollbar(self):
"""Changes the scrollbar.
Called when the model has changed.
"""
if not self.inhibit:
self.inhibit = True
length = self._graph.numframes()
start, end = self._graph.view()
if start != end:
page_size = (end - start)
self._adjustment.set_all(value = start,
lower = 0,
upper = length,
page_increment = page_size,
step_increment = page_size / 5.,
page_size = page_size)
else:
# empty sound
self._adjustment.set_all(value = 0,
lower = 0,
upper = 1,
page_increment = 0,
step_increment = 0,
page_size = 1)
self.inhibit = False
# -- Tests
if __name__ == '__main__':
from gum.lib.mock import Mock, Fake
def test_layered():
def randomized():
from random import random
channels = [[((random() - 0.5) * 2, (random() - 0.5) * 2)
for i in xrange(500)]]
graph = Mock({"channels": channels,
"set_width": None,
"frames_info": (0, 0, 0)})
graph.changed = Fake()
layered = LayeredGraphView(graph)
layered.layers.append(WaveformLayer(layered, graph))
return layered
def sine():
from math import sin
sine = [sin(2 * 3.14 * 0.01 * x) for x in xrange(500)]
channels = [[(i, i) for i in sine]]
graph = Mock({"channels": channels, "set_width": None,
"frames_info": (0, 0, 0)})
graph.changed = Fake()
layered = LayeredGraphView(graph)
layered.layers.append(WaveformLayer(layered, graph))
return layered
def sines():
from math import sin
sine = [sin(2 * 3.14 * 0.01 * x) for x in xrange(500)]
channels = [[(i, i) for i in sine], [(i, i) for i in sine]]
graph = Mock({"channels": channels, "set_width": None,
"frames_info": (0, 0, 0)})
graph.changed = Fake()
layered = LayeredGraphView(graph)
layered.layers.append(WaveformLayer(layered, graph))
return layered
def test_selection_layer(layerclass):
graph = Mock({"channels": [], "set_width": None,
"frames_info": (0, 0, 0)})
graph.changed = Fake()
selection = Mock({"pixels": (20, 100), "selected": True})
selection.changed = Fake()
layered = LayeredGraphView(graph)
layered.layers.append(layerclass(layered, selection))
return layered
def background():
return test_selection_layer(BackgroundLayer)
def selection():
return test_selection_layer(SelectionLayer)
def cursor():
graph = Mock({"channels": [], "set_width": None,
"frames_info": (0, 0, 0)})
class Cursor: pass
cursor = Cursor()
cursor.changed = Fake()
cursor.pixel = lambda: 50
layered = LayeredGraphView(graph)
cursorlayer = CursorLayer(layered, cursor)
cursorlayer.rgba = (1, 0, 0, 1)
layered.layers.append(cursorlayer)
return layered
layereds = [randomized(), sine(), sines(), selection(), cursor(),
background()]
for layered in layereds:
window = gtk.Window()
window.resize(500, 200)
window.connect("delete-event", gtk.main_quit)
window.add(layered)
window.show_all()
gtk.main()
test_layered()
| |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages packages on the Guest VM.
"""
import os
import re
import subprocess
from tempfile import NamedTemporaryFile
from oslo_log import log as logging
import pexpect
from trove.common import exception
from trove.common.exception import ProcessExecutionError
from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
LOG = logging.getLogger(__name__)
OK = 0
RUN_DPKG_FIRST = 1
REINSTALL_FIRST = 2
CONFLICT_REMOVED = 3
def getoutput(*cmd):
"""Get the stdout+stderr of a command, ignore errors.
Similar to commands.getstatusoutput(cmd)[1] of Python 2.
"""
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except OSError:
# ignore errors like program not found
return b''
stdout = proc.communicate()[0]
return stdout
class PkgAdminLockError(exception.TroveError):
pass
class PkgPermissionError(exception.TroveError):
pass
class PkgPackageStateError(exception.TroveError):
pass
class PkgNotFoundError(exception.NotFound):
pass
class PkgTimeout(exception.TroveError):
pass
class PkgScriptletError(exception.TroveError):
pass
class PkgDownloadError(exception.TroveError):
pass
class PkgSignError(exception.TroveError):
pass
class PkgBrokenError(exception.TroveError):
pass
class PkgConfigureError(exception.TroveError):
pass
class BasePackagerMixin(object):
def pexpect_kill_proc(self, child):
child.delayafterclose = 1
child.delayafterterminate = 1
try:
child.close(force=True)
except pexpect.ExceptionPexpect:
# Close fails to terminate a sudo process on some OSes.
subprocess.call(['sudo', 'kill', str(child.pid)])
def pexpect_wait_and_close_proc(self, child):
child.expect(pexpect.EOF)
child.close()
def pexpect_run(self, cmd, output_expects, time_out):
child = pexpect.spawn(cmd, timeout=time_out)
try:
i = child.expect(output_expects)
match = child.match
self.pexpect_wait_and_close_proc(child)
except pexpect.TIMEOUT:
self.pexpect_kill_proc(child)
raise PkgTimeout(_("Process timeout after %i seconds.") % time_out)
return (i, match)
class RPMPackagerMixin(BasePackagerMixin):
def _rpm_remove_nodeps(self, package_name):
"""
Sometimes transaction errors happens, easy way is to remove
conflicted package without dependencies and hope it will replaced
by another package
"""
try:
utils.execute("rpm", "-e", "--nodeps", package_name,
run_as_root=True, root_helper="sudo")
except ProcessExecutionError:
LOG.exception(_("Error removing conflict %(package)s"),
package_name)
def _install(self, packages, time_out):
"""must be overridden by an RPM based PackagerMixin"""
raise NotImplementedError()
def _remove(self, package_name, time_out):
"""must be overridden by an RPM based PackagerMixin"""
raise NotImplementedError()
def pkg_install(self, packages, config_opts, time_out):
result = self._install(packages, time_out)
if result != OK:
while result == CONFLICT_REMOVED:
result = self._install(packages, time_out)
if result != OK:
raise PkgPackageStateError(_("Cannot install packages."))
def pkg_is_installed(self, packages):
packages = packages if isinstance(packages, list) else packages.split()
std_out = getoutput("rpm", "-qa")
for pkg in packages:
found = False
for line in std_out.split("\n"):
if line.find(pkg) != -1:
found = True
break
if not found:
return False
return True
def pkg_version(self, package_name):
std_out = getoutput("rpm", "-qa",
"--qf", "'%{VERSION}-%{RELEASE}\n'",
package_name)
# Need to capture the version string
# check the command output
for line in std_out.split("\n"):
regex = re.compile("[0-9.]+-.*")
matches = regex.match(line)
if matches:
line = matches.group()
return line
LOG.error(_("Unexpected output from rpm command. (%(output)s)"),
{'output': std_out})
def pkg_remove(self, package_name, time_out):
"""Removes a package."""
if self.pkg_version(package_name) is None:
return
result = self._remove(package_name, time_out)
if result != OK:
raise PkgPackageStateError(_("Package %s is in a bad state.")
% package_name)
class RedhatPackagerMixin(RPMPackagerMixin):
def _install(self, packages, time_out):
"""Attempts to install packages.
Returns OK if the packages are installed or a result code if a
recoverable-error occurred.
Raises an exception if a non-recoverable error or timeout occurs.
"""
cmd = "sudo yum --color=never -y install %s" % " ".join(packages)
output_expects = ['\[sudo\] password for .*:',
'No package (.*) available.',
('file .* from install of .* conflicts with file'
' from package (.*?)\r\n'),
'Error: (.*?) conflicts with .*?\r\n',
'Processing Conflict: .* conflicts (.*?)\r\n',
'.*scriptlet failed*',
'HTTP Error',
'No more mirrors to try.',
'GPG key retrieval failed:',
'.*already installed and latest version',
'Updated:',
'Installed:']
LOG.debug("Running package install command: %s", cmd)
i, match = self.pexpect_run(cmd, output_expects, time_out)
if i == 0:
raise PkgPermissionError(_("Invalid permissions."))
elif i == 1:
raise PkgNotFoundError(_("Could not find package %s") %
match.group(1))
elif i == 2 or i == 3 or i == 4:
self._rpm_remove_nodeps(match.group(1))
return CONFLICT_REMOVED
elif i == 5:
raise PkgScriptletError(_("Package scriptlet failed"))
elif i == 6 or i == 7:
raise PkgDownloadError(_("Package download problem"))
elif i == 8:
raise PkgSignError(_("GPG key retrieval failed"))
return OK
def _remove(self, package_name, time_out):
"""Removes a package.
Returns OK if the package is removed successfully or a result code if a
recoverable-error occurs.
Raises an exception if a non-recoverable error or timeout occurs.
"""
cmd = "sudo yum --color=never -y remove %s" % package_name
LOG.debug("Running package remove command: %s", cmd)
output_expects = ['\[sudo\] password for .*:',
'No Packages marked for removal',
'Removed:']
i, match = self.pexpect_run(cmd, output_expects, time_out)
if i == 0:
raise PkgPermissionError(_("Invalid permissions."))
elif i == 1:
raise PkgNotFoundError(_("Could not find package %s") %
package_name)
return OK
class DebianPackagerMixin(BasePackagerMixin):
def _fix(self, time_out):
"""Sometimes you have to run this command before a
package will install.
"""
try:
utils.execute("dpkg", "--configure", "-a", run_as_root=True,
root_helper="sudo")
except ProcessExecutionError:
LOG.exception(_("Error fixing dpkg"))
def _fix_package_selections(self, packages, config_opts):
"""
Sometimes you have to run this command before a package will install.
This command sets package selections to configure package.
"""
selections = ""
for package in packages:
m = re.match('(.+)=(.+)', package)
if m:
package_name = m.group(1)
else:
package_name = package
std_out = getoutput("sudo", "debconf-show", package_name)
for line in std_out.split("\n"):
for selection, value in config_opts.items():
m = re.match(".* (.*/%s):.*" % selection, line)
if m:
selections += ("%s %s string '%s'\n" %
(package_name, m.group(1), value))
if selections:
with NamedTemporaryFile(delete=False) as f:
fname = f.name
f.write(selections)
try:
utils.execute("debconf-set-selections", fname,
run_as_root=True, root_helper="sudo")
utils.execute("dpkg", "--configure", "-a",
run_as_root=True, root_helper="sudo")
except ProcessExecutionError:
raise PkgConfigureError(_("Error configuring package."))
finally:
os.remove(fname)
def _install(self, packages, time_out):
"""Attempts to install packages.
Returns OK if the packages are installed or a result code if a
recoverable-error occurred.
Raises an exception if a non-recoverable error or timeout occurs.
"""
cmd = "sudo -E DEBIAN_FRONTEND=noninteractive apt-get -y " \
"--force-yes --allow-unauthenticated -o " \
"DPkg::options::=--force-confmiss --reinstall " \
"install %s" % " ".join(packages)
output_expects = ['.*password*',
'E: Unable to locate package (.*)',
"Couldn't find package (.*)",
"E: Version '.*' for '(.*)' was not found",
("dpkg was interrupted, you must manually run "
"'sudo dpkg --configure -a'"),
"Unable to lock the administration directory",
("E: Unable to correct problems, you have held "
"broken packages."),
"Setting up (.*)",
"is already the newest version"]
LOG.debug("Running package install command: %s", cmd)
i, match = self.pexpect_run(cmd, output_expects, time_out)
if i == 0:
raise PkgPermissionError(_("Invalid permissions."))
elif i == 1 or i == 2 or i == 3:
raise PkgNotFoundError(_("Could not find package %s") %
match.group(1))
elif i == 4:
return RUN_DPKG_FIRST
elif i == 5:
raise PkgAdminLockError()
elif i == 6:
raise PkgBrokenError()
return OK
def _remove(self, package_name, time_out):
"""Removes a package.
Returns OK if the package is removed successfully or a result code if a
recoverable-error occurs.
Raises an exception if a non-recoverable error or timeout occurs.
"""
cmd = "sudo -E apt-get -y --allow-unauthenticated remove %s" \
% package_name
output_expects = ['.*password*',
'E: Unable to locate package %s' % package_name,
'Package is in a very bad inconsistent state',
'Sub-process /usr/bin/dpkg returned an error code',
("dpkg was interrupted, you must manually run "
"'sudo dpkg --configure -a'"),
"Unable to lock the administration directory",
"Removing %s*" % package_name]
LOG.debug("Running remove package command %s", cmd)
i, match = self.pexpect_run(cmd, output_expects, time_out)
if i == 0:
raise PkgPermissionError(_("Invalid permissions."))
elif i == 1:
raise PkgNotFoundError(_("Could not find package %s") %
package_name)
elif i == 2 or i == 3:
return REINSTALL_FIRST
elif i == 4:
return RUN_DPKG_FIRST
elif i == 5:
raise PkgAdminLockError()
return OK
def pkg_install(self, packages, config_opts, time_out):
"""Installs packages."""
try:
utils.execute("apt-get", "update", run_as_root=True,
root_helper="sudo")
except ProcessExecutionError:
LOG.exception(_("Error updating the apt sources"))
result = self._install(packages, time_out)
if result != OK:
if result == RUN_DPKG_FIRST:
self._fix(time_out)
result = self._install(packages, time_out)
if result != OK:
raise PkgPackageStateError(_("Packages are in a bad state."))
# even after successful install, packages can stay unconfigured
# config_opts - is dict with name/value for questions asked by
# interactive configure script
if config_opts:
self._fix_package_selections(packages, config_opts)
def pkg_version(self, package_name):
std_out = getoutput("apt-cache", "policy", package_name)
for line in std_out.split("\n"):
m = re.match("\s+Installed: (.*)", line)
if m:
version = m.group(1)
if version == "(none)":
version = None
return version
def pkg_is_installed(self, packages):
packages = packages if isinstance(packages, list) else packages.split()
for pkg in packages:
m = re.match('(.+)=(.+)', pkg)
if m:
package_name = m.group(1)
package_version = m.group(2)
else:
package_name = pkg
package_version = None
installed_version = self.pkg_version(package_name)
if ((package_version and installed_version == package_version) or
(installed_version and not package_version)):
LOG.debug("Package %s already installed.", package_name)
else:
return False
return True
def pkg_remove(self, package_name, time_out):
"""Removes a package."""
if self.pkg_version(package_name) is None:
return
result = self._remove(package_name, time_out)
if result != OK:
if result == REINSTALL_FIRST:
self._install(package_name, time_out)
elif result == RUN_DPKG_FIRST:
self._fix(time_out)
result = self._remove(package_name, time_out)
if result != OK:
raise PkgPackageStateError(_("Package %s is in a bad state.")
% package_name)
if operating_system.get_os() == operating_system.REDHAT:
class Package(RedhatPackagerMixin):
pass
else:
class Package(DebianPackagerMixin):
pass
| |
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.encoder import Encoder
from cassandra.protocol import ColumnMetadata
from cassandra.query import (bind_params, ValueSequence, PreparedStatement,
BoundStatement, UNSET_VALUE)
from cassandra.cqltypes import Int32Type
from cassandra.util import OrderedDict
from six.moves import xrange
import six
class ParamBindingTest(unittest.TestCase):
def test_bind_sequence(self):
result = bind_params("%s %s %s", (1, "a", 2.0), Encoder())
self.assertEqual(result, "1 'a' 2.0")
def test_bind_map(self):
result = bind_params("%(a)s %(b)s %(c)s", dict(a=1, b="a", c=2.0), Encoder())
self.assertEqual(result, "1 'a' 2.0")
def test_sequence_param(self):
result = bind_params("%s", (ValueSequence((1, "a", 2.0)),), Encoder())
self.assertEqual(result, "(1, 'a', 2.0)")
def test_generator_param(self):
result = bind_params("%s", ((i for i in xrange(3)),), Encoder())
self.assertEqual(result, "[0, 1, 2]")
def test_none_param(self):
result = bind_params("%s", (None,), Encoder())
self.assertEqual(result, "NULL")
def test_list_collection(self):
result = bind_params("%s", (['a', 'b', 'c'],), Encoder())
self.assertEqual(result, "['a', 'b', 'c']")
def test_set_collection(self):
result = bind_params("%s", (set(['a', 'b']),), Encoder())
self.assertIn(result, ("{'a', 'b'}", "{'b', 'a'}"))
def test_map_collection(self):
vals = OrderedDict()
vals['a'] = 'a'
vals['b'] = 'b'
vals['c'] = 'c'
result = bind_params("%s", (vals,), Encoder())
self.assertEqual(result, "{'a': 'a', 'b': 'b', 'c': 'c'}")
def test_quote_escaping(self):
result = bind_params("%s", ("""'ef''ef"ef""ef'""",), Encoder())
self.assertEqual(result, """'''ef''''ef"ef""ef'''""")
def test_float_precision(self):
f = 3.4028234663852886e+38
self.assertEqual(float(bind_params("%s", (f,), Encoder())), f)
class BoundStatementTestV1(unittest.TestCase):
protocol_version=1
@classmethod
def setUpClass(cls):
cls.prepared = PreparedStatement(column_metadata=[
ColumnMetadata('keyspace', 'cf', 'rk0', Int32Type),
ColumnMetadata('keyspace', 'cf', 'rk1', Int32Type),
ColumnMetadata('keyspace', 'cf', 'ck0', Int32Type),
ColumnMetadata('keyspace', 'cf', 'v0', Int32Type)
],
query_id=None,
routing_key_indexes=[1, 0],
query=None,
keyspace='keyspace',
protocol_version=cls.protocol_version, result_metadata=None)
cls.bound = BoundStatement(prepared_statement=cls.prepared)
def test_invalid_argument_type(self):
values = (0, 0, 0, 'string not int')
try:
self.bound.bind(values)
except TypeError as e:
self.assertIn('v0', str(e))
self.assertIn('Int32Type', str(e))
self.assertIn('str', str(e))
else:
self.fail('Passed invalid type but exception was not thrown')
values = (['1', '2'], 0, 0, 0)
try:
self.bound.bind(values)
except TypeError as e:
self.assertIn('rk0', str(e))
self.assertIn('Int32Type', str(e))
self.assertIn('list', str(e))
else:
self.fail('Passed invalid type but exception was not thrown')
def test_inherit_fetch_size(self):
keyspace = 'keyspace1'
column_family = 'cf1'
column_metadata = [
ColumnMetadata(keyspace, column_family, 'foo1', Int32Type),
ColumnMetadata(keyspace, column_family, 'foo2', Int32Type)
]
prepared_statement = PreparedStatement(column_metadata=column_metadata,
query_id=None,
routing_key_indexes=[],
query=None,
keyspace=keyspace,
protocol_version=self.protocol_version,
result_metadata=None)
prepared_statement.fetch_size = 1234
bound_statement = BoundStatement(prepared_statement=prepared_statement)
self.assertEqual(1234, bound_statement.fetch_size)
def test_too_few_parameters_for_routing_key(self):
self.assertRaises(ValueError, self.prepared.bind, (1,))
bound = self.prepared.bind((1, 2))
self.assertEqual(bound.keyspace, 'keyspace')
def test_dict_missing_routing_key(self):
self.assertRaises(KeyError, self.bound.bind, {'rk0': 0, 'ck0': 0, 'v0': 0})
self.assertRaises(KeyError, self.bound.bind, {'rk1': 0, 'ck0': 0, 'v0': 0})
def test_missing_value(self):
self.assertRaises(KeyError, self.bound.bind, {'rk0': 0, 'rk1': 0, 'ck0': 0})
def test_extra_value(self):
self.bound.bind({'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': 0, 'should_not_be_here': 123}) # okay to have extra keys in dict
self.assertEqual(self.bound.values, [six.b('\x00') * 4] * 4) # four encoded zeros
self.assertRaises(ValueError, self.bound.bind, (0, 0, 0, 0, 123))
def test_values_none(self):
# should have values
self.assertRaises(ValueError, self.bound.bind, None)
# prepared statement with no values
prepared_statement = PreparedStatement(column_metadata=[],
query_id=None,
routing_key_indexes=[],
query=None,
keyspace='whatever',
protocol_version=self.protocol_version,
result_metadata=None)
bound = prepared_statement.bind(None)
self.assertListEqual(bound.values, [])
def test_bind_none(self):
self.bound.bind({'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': None})
self.assertEqual(self.bound.values[-1], None)
old_values = self.bound.values
self.bound.bind((0, 0, 0, None))
self.assertIsNot(self.bound.values, old_values)
self.assertEqual(self.bound.values[-1], None)
def test_unset_value(self):
self.assertRaises(ValueError, self.bound.bind, {'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': UNSET_VALUE})
self.assertRaises(ValueError, self.bound.bind, (0, 0, 0, UNSET_VALUE))
class BoundStatementTestV2(BoundStatementTestV1):
protocol_version=2
class BoundStatementTestV3(BoundStatementTestV1):
protocol_version=3
class BoundStatementTestV4(BoundStatementTestV1):
protocol_version=4
def test_dict_missing_routing_key(self):
# in v4 it implicitly binds UNSET_VALUE for missing items,
# UNSET_VALUE is ValueError for routing keys
self.assertRaises(ValueError, self.bound.bind, {'rk0': 0, 'ck0': 0, 'v0': 0})
self.assertRaises(ValueError, self.bound.bind, {'rk1': 0, 'ck0': 0, 'v0': 0})
def test_missing_value(self):
# in v4 missing values are UNSET_VALUE
self.bound.bind({'rk0': 0, 'rk1': 0, 'ck0': 0})
self.assertEqual(self.bound.values[-1], UNSET_VALUE)
old_values = self.bound.values
self.bound.bind((0, 0, 0))
self.assertIsNot(self.bound.values, old_values)
self.assertEqual(self.bound.values[-1], UNSET_VALUE)
def test_unset_value(self):
self.bound.bind({'rk0': 0, 'rk1': 0, 'ck0': 0, 'v0': UNSET_VALUE})
self.assertEqual(self.bound.values[-1], UNSET_VALUE)
old_values = self.bound.values
self.bound.bind((0, 0, 0, UNSET_VALUE))
self.assertEqual(self.bound.values[-1], UNSET_VALUE)
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NephoScale Cloud driver (http://www.nephoscale.com)
API documentation: http://docs.nephoscale.com
Created by Markos Gogoulos (https://mist.io)
"""
import base64
import time
import os
import binascii
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.utils.py3 import urlencode
from libcloud.compute.providers import Provider
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.compute.types import (NodeState, InvalidCredsError,
LibcloudError)
from libcloud.compute.base import (Node, NodeDriver, NodeImage, NodeSize,
NodeLocation)
from libcloud.utils.networking import is_private_subnet
API_HOST = 'api.nephoscale.com'
NODE_STATE_MAP = {
'on': NodeState.RUNNING,
'off': NodeState.UNKNOWN,
'unknown': NodeState.UNKNOWN,
}
VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
# used in create_node and specifies how many times to get the list of nodes and
# check if the newly created node is there. This is because when a request is
# sent to create a node, NephoScale replies with the job id, and not the node
# itself thus we don't have the ip addresses, that are required in deploy_node
CONNECT_ATTEMPTS = 10
class NodeKey(object):
def __init__(self, id, name, public_key=None, key_group=None,
password=None):
self.id = id
self.name = name
self.key_group = key_group
self.password = password
self.public_key = public_key
def __repr__(self):
return (('<NodeKey: id=%s, name=%s>') %
(self.id, self.name))
class NephoscaleResponse(JsonResponse):
"""
Nephoscale API Response
"""
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError('Authorization Failed')
if self.status == httplib.NOT_FOUND:
raise Exception("The resource you are looking for is not found.")
return self.body
def success(self):
return self.status in VALID_RESPONSE_CODES
class NephoscaleConnection(ConnectionUserAndKey):
"""
Nephoscale connection class.
Authenticates to the API through Basic Authentication
with username/password
"""
host = API_HOST
responseCls = NephoscaleResponse
allow_insecure = False
def add_default_headers(self, headers):
"""
Add parameters that are necessary for every request
"""
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
return headers
class NephoscaleNodeDriver(NodeDriver):
"""
Nephoscale node driver class.
>>> from libcloud.compute.providers import get_driver
>>> driver = get_driver('nephoscale')
>>> conn = driver('nepho_user','nepho_password')
>>> conn.list_nodes()
"""
type = Provider.NEPHOSCALE
api_name = 'nephoscale'
name = 'NephoScale'
website = 'http://www.nephoscale.com'
connectionCls = NephoscaleConnection
features = {'create_node': ['ssh_key']}
def list_locations(self):
"""
List available zones for deployment
:rtype: ``list`` of :class:`NodeLocation`
"""
result = self.connection.request('/datacenter/zone/').object
locations = []
for value in result.get('data', []):
location = NodeLocation(id=value.get('id'),
name=value.get('name'),
country='US',
driver=self)
locations.append(location)
return locations
def list_images(self):
"""
List available images for deployment
:rtype: ``list`` of :class:`NodeImage`
"""
result = self.connection.request('/image/server/').object
images = []
for value in result.get('data', []):
extra = {'architecture': value.get('architecture'),
'disks': value.get('disks'),
'billable_type': value.get('billable_type'),
'pcpus': value.get('pcpus'),
'cores': value.get('cores'),
'uri': value.get('uri'),
'storage': value.get('storage'),
}
image = NodeImage(id=value.get('id'),
name=value.get('friendly_name'),
driver=self,
extra=extra)
images.append(image)
return images
def list_sizes(self):
"""
List available sizes containing prices
:rtype: ``list`` of :class:`NodeSize`
"""
result = self.connection.request('/server/type/cloud/').object
sizes = []
for value in result.get('data', []):
value_id = value.get('id')
size = NodeSize(id=value_id,
name=value.get('friendly_name'),
ram=value.get('ram'),
disk=value.get('storage'),
bandwidth=None,
price=self._get_size_price(size_id=str(value_id)),
driver=self)
sizes.append(size)
return sorted(sizes, key=lambda k: k.price)
def list_nodes(self):
"""
List available nodes
:rtype: ``list`` of :class:`Node`
"""
result = self.connection.request('/server/cloud/').object
nodes = [self._to_node(value) for value in result.get('data', [])]
return nodes
def rename_node(self, node, name, hostname=None):
"""rename a cloud server, optionally specify hostname too"""
data = {'name': name}
if hostname:
data['hostname'] = hostname
params = urlencode(data)
result = self.connection.request('/server/cloud/%s/' % node.id,
data=params, method='PUT').object
return result.get('response') in VALID_RESPONSE_CODES
def reboot_node(self, node):
"""reboot a running node"""
result = self.connection.request('/server/cloud/%s/initiator/restart/'
% node.id, method='POST').object
return result.get('response') in VALID_RESPONSE_CODES
def ex_start_node(self, node):
"""start a stopped node"""
result = self.connection.request('/server/cloud/%s/initiator/start/'
% node.id, method='POST').object
return result.get('response') in VALID_RESPONSE_CODES
def ex_stop_node(self, node):
"""stop a running node"""
result = self.connection.request('/server/cloud/%s/initiator/stop/'
% node.id, method='POST').object
return result.get('response') in VALID_RESPONSE_CODES
def destroy_node(self, node):
"""destroy a node"""
result = self.connection.request('/server/cloud/%s/' % node.id,
method='DELETE').object
return result.get('response') in VALID_RESPONSE_CODES
def ex_list_keypairs(self, ssh=False, password=False, key_group=None):
"""
List available console and server keys
There are two types of keys for NephoScale, ssh and password keys.
If run without arguments, lists all keys. Otherwise list only
ssh keys, or only password keys.
Password keys with key_group 4 are console keys. When a server
is created, it has two keys, one password or ssh key, and
one password console key.
:keyword ssh: if specified, show ssh keys only (optional)
:type ssh: ``bool``
:keyword password: if specified, show password keys only (optional)
:type password: ``bool``
:keyword key_group: if specified, show keys with this key_group only
eg key_group=4 for console password keys (optional)
:type key_group: ``int``
:rtype: ``list`` of :class:`NodeKey`
"""
if (ssh and password):
raise LibcloudError('You can only supply ssh or password. To \
get all keys call with no arguments')
if ssh:
result = self.connection.request('/key/sshrsa/').object
elif password:
result = self.connection.request('/key/password/').object
else:
result = self.connection.request('/key/').object
keys = [self._to_key(value) for value in result.get('data', [])]
if key_group:
keys = [key for key in keys if
key.key_group == key_group]
return keys
def ex_create_keypair(self, name, public_key=None, password=None,
key_group=None):
"""Creates a key, ssh or password, for server or console
The group for the key (key_group) is 1 for Server and 4 for Console
Returns the id of the created key
"""
if public_key:
if not key_group:
key_group = 1
data = {
'name': name,
'public_key': public_key,
'key_group': key_group
}
params = urlencode(data)
result = self.connection.request('/key/sshrsa/', data=params,
method='POST').object
else:
if not key_group:
key_group = 4
if not password:
password = self.random_password()
data = {
'name': name,
'password': password,
'key_group': key_group
}
params = urlencode(data)
result = self.connection.request('/key/password/', data=params,
method='POST').object
return result.get('data', {}).get('id', '')
def ex_delete_keypair(self, key_id, ssh=False):
"""Delete an ssh key or password given it's id
"""
if ssh:
result = self.connection.request('/key/sshrsa/%s/' % key_id,
method='DELETE').object
else:
result = self.connection.request('/key/password/%s/' % key_id,
method='DELETE').object
return result.get('response') in VALID_RESPONSE_CODES
def create_node(self, name, size, image, server_key=None,
console_key=None, zone=None, **kwargs):
"""Creates the node, and sets the ssh key, console key
NephoScale will respond with a 200-200 response after sending a valid
request. If nowait=True is specified in the args, we then ask a few
times until the server is created and assigned a public IP address,
so that deploy_node can be run
>>> from libcloud.compute.providers import get_driver
>>> driver = get_driver('nephoscale')
>>> conn = driver('nepho_user','nepho_password')
>>> conn.list_nodes()
>>> name = 'staging-server'
>>> size = conn.list_sizes()[0]
<NodeSize: id=27, ...name=CS025 - 0.25GB, 10GB, ...>
>>> image = conn.list_images()[9]
<NodeImage: id=49, name=Linux Ubuntu Server 10.04 LTS 64-bit, ...>
>>> server_keys = conn.ex_list_keypairs(key_group=1)[0]
<NodeKey: id=71211, name=markos>
>>> server_key = conn.ex_list_keypairs(key_group=1)[0].id
70867
>>> console_keys = conn.ex_list_keypairs(key_group=4)[0]
<NodeKey: id=71213, name=mistio28434>
>>> console_key = conn.ex_list_keypairs(key_group=4)[0].id
70907
>>> node = conn.create_node(name=name, size=size, image=image, \
console_key=console_key, server_key=server_key)
We can also create an ssh key, plus a console key and
deploy node with them
>>> server_key = conn.ex_create_keypair(name, public_key='123')
71211
>>> console_key = conn.ex_create_keypair(name, key_group=4)
71213
We can increase the number of connect attempts to wait until
the node is created, so that deploy_node has ip address to
deploy the script
We can also specify the location
>>> location = conn.list_locations()[0]
>>> node = conn.create_node(name=name,
>>> ... size=size,
>>> ... image=image,
>>> ... console_key=console_key,
>>> ... server_key=server_key,
>>> ... connect_attempts=10,
>>> ... nowait=True,
>>> ... zone=location.id)
"""
hostname = kwargs.get('hostname', name)
service_type = size.id
image = image.id
connect_attempts = int(kwargs.get('connect_attempts',
CONNECT_ATTEMPTS))
data = {'name': name,
'hostname': hostname,
'service_type': service_type,
'image': image,
'server_key': server_key,
'console_key': console_key,
'zone': zone
}
params = urlencode(data)
try:
node = self.connection.request('/server/cloud/', data=params,
method='POST')
except Exception as e:
raise Exception("Failed to create node %s" % e)
node = Node(id='', name=name, state=NodeState.UNKNOWN, public_ips=[],
private_ips=[], driver=self)
nowait = kwargs.get('ex_wait', False)
if not nowait:
return node
else:
# try to get the created node public ips, for use in deploy_node
# At this point we don't have the id of the newly created Node,
# so search name in nodes
created_node = False
while connect_attempts > 0:
nodes = self.list_nodes()
created_node = [c_node for c_node in nodes if
c_node.name == name]
if created_node:
return created_node[0]
else:
time.sleep(60)
connect_attempts = connect_attempts - 1
return node
def _to_node(self, data):
"""Convert node in Node instances
"""
state = NODE_STATE_MAP.get(data.get('power_status'), '4')
public_ips = []
private_ips = []
ip_addresses = data.get('ipaddresses', '')
# E.g. "ipaddresses": "198.120.14.6, 10.132.60.1"
if ip_addresses:
for ip in ip_addresses.split(','):
ip = ip.replace(' ', '')
if is_private_subnet(ip):
private_ips.append(ip)
else:
public_ips.append(ip)
extra = {
'zone_data': data.get('zone'),
'zone': data.get('zone', {}).get('name'),
'image': data.get('image', {}).get('friendly_name'),
'create_time': data.get('create_time'),
'network_ports': data.get('network_ports'),
'is_console_enabled': data.get('is_console_enabled'),
'service_type': data.get('service_type', {}).get('friendly_name'),
'hostname': data.get('hostname')
}
node = Node(id=data.get('id'), name=data.get('name'), state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self, extra=extra)
return node
def _to_key(self, data):
return NodeKey(id=data.get('id'),
name=data.get('name'),
password=data.get('password'),
key_group=data.get('key_group'),
public_key=data.get('public_key'))
def random_password(self, size=8):
value = os.urandom(size)
password = binascii.hexlify(value).decode('ascii')
return password[:size]
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.tap}.
"""
from __future__ import absolute_import, division
import os
import stat
from twisted.python.reflect import requireModule
from twisted.python.usage import UsageError
from twisted.python.filepath import FilePath
from twisted.internet.interfaces import IReactorUNIX
from twisted.internet import reactor, endpoints
from twisted.python.threadpool import ThreadPool
from twisted.trial.unittest import TestCase
from twisted.python.compat import _PY3
from twisted.web.server import Site
from twisted.web.static import Data, File
from twisted.web.tap import Options, makeService
from twisted.web.script import PythonScript
from twisted.web.wsgi import WSGIResource
if not _PY3:
# FIXME: https://twistedmatrix.com/trac/ticket/8009
from twisted.web.twcgi import CGIScript
# FIXME: https://twistedmatrix.com/trac/ticket/8010
# FIXME: https://twistedmatrix.com/trac/ticket/7598
from twisted.web.distrib import ResourcePublisher, UserDirectory
from twisted.spread.pb import PBServerFactory
from twisted.web.tap import makePersonalServerFactory
application = object()
class ServiceTests(TestCase):
"""
Tests for the service creation APIs in L{twisted.web.tap}.
"""
def _pathOption(self):
"""
Helper for the I{--path} tests which creates a directory and creates
an L{Options} object which uses that directory as its static
filesystem root.
@return: A two-tuple of a L{FilePath} referring to the directory and
the value associated with the C{'root'} key in the L{Options}
instance after parsing a I{--path} option.
"""
path = FilePath(self.mktemp())
path.makedirs()
options = Options()
options.parseOptions(['--path', path.path])
root = options['root']
return path, root
def test_path(self):
"""
The I{--path} option causes L{Options} to create a root resource
which serves responses from the specified path.
"""
path, root = self._pathOption()
self.assertIsInstance(root, File)
self.assertEqual(root.path, path.path)
def test_pathServer(self):
"""
The I{--path} option to L{makeService} causes it to return a service
which will listen on the server address given by the I{--port} option.
"""
path = FilePath(self.mktemp())
path.makedirs()
port = self.mktemp()
options = Options()
options.parseOptions(['--port', 'unix:' + port, '--path', path.path])
service = makeService(options)
service.startService()
self.addCleanup(service.stopService)
self.assertIsInstance(service.services[0].factory.resource, File)
self.assertEqual(service.services[0].factory.resource.path, path.path)
self.assertTrue(os.path.exists(port))
self.assertTrue(stat.S_ISSOCK(os.stat(port).st_mode))
if not IReactorUNIX.providedBy(reactor):
test_pathServer.skip = (
"The reactor does not support UNIX domain sockets")
def test_cgiProcessor(self):
"""
The I{--path} option creates a root resource which serves a
L{CGIScript} instance for any child with the C{".cgi"} extension.
"""
path, root = self._pathOption()
path.child("foo.cgi").setContent(b"")
self.assertIsInstance(root.getChild("foo.cgi", None), CGIScript)
if _PY3:
test_cgiProcessor.skip = (
"Will be ported in https://twistedmatrix.com/trac/ticket/8009")
def test_epyProcessor(self):
"""
The I{--path} option creates a root resource which serves a
L{PythonScript} instance for any child with the C{".epy"} extension.
"""
path, root = self._pathOption()
path.child("foo.epy").setContent(b"")
self.assertIsInstance(root.getChild("foo.epy", None), PythonScript)
def test_rpyProcessor(self):
"""
The I{--path} option creates a root resource which serves the
C{resource} global defined by the Python source in any child with
the C{".rpy"} extension.
"""
path, root = self._pathOption()
path.child("foo.rpy").setContent(
b"from twisted.web.static import Data\n"
b"resource = Data('content', 'major/minor')\n")
child = root.getChild("foo.rpy", None)
self.assertIsInstance(child, Data)
self.assertEqual(child.data, 'content')
self.assertEqual(child.type, 'major/minor')
def test_makePersonalServerFactory(self):
"""
L{makePersonalServerFactory} returns a PB server factory which has
as its root object a L{ResourcePublisher}.
"""
# The fact that this pile of objects can actually be used somehow is
# verified by twisted.web.test.test_distrib.
site = Site(Data(b"foo bar", "text/plain"))
serverFactory = makePersonalServerFactory(site)
self.assertIsInstance(serverFactory, PBServerFactory)
self.assertIsInstance(serverFactory.root, ResourcePublisher)
self.assertIdentical(serverFactory.root.site, site)
def test_personalServer(self):
"""
The I{--personal} option to L{makeService} causes it to return a
service which will listen on the server address given by the I{--port}
option.
"""
port = self.mktemp()
options = Options()
options.parseOptions(['--port', 'unix:' + port, '--personal'])
service = makeService(options)
service.startService()
self.addCleanup(service.stopService)
self.assertTrue(os.path.exists(port))
self.assertTrue(stat.S_ISSOCK(os.stat(port).st_mode))
if not IReactorUNIX.providedBy(reactor):
test_personalServer.skip = (
"The reactor does not support UNIX domain sockets")
def test_defaultPersonalPath(self):
"""
If the I{--port} option not specified but the I{--personal} option is,
L{Options} defaults the port to C{UserDirectory.userSocketName} in the
user's home directory.
"""
options = Options()
options.parseOptions(['--personal'])
path = os.path.expanduser(
os.path.join('~', UserDirectory.userSocketName))
self.assertEqual(
endpoints._parseServer(options['port'], None)[:2],
('UNIX', (path, None)))
if not IReactorUNIX.providedBy(reactor):
test_defaultPersonalPath.skip = (
"The reactor does not support UNIX domain sockets")
if _PY3:
for i in [test_makePersonalServerFactory, test_personalServer,
test_defaultPersonalPath]:
i.skip = (
"Will be ported in https://twistedmatrix.com/trac/ticket/8010")
del i
def test_defaultPort(self):
"""
If the I{--port} option is not specified, L{Options} defaults the port
to C{8080}.
"""
options = Options()
options.parseOptions([])
self.assertEqual(
endpoints._parseServer(options['port'], None)[:2],
('TCP', (8080, None)))
def test_wsgi(self):
"""
The I{--wsgi} option takes the fully-qualifed Python name of a WSGI
application object and creates a L{WSGIResource} at the root which
serves that application.
"""
options = Options()
options.parseOptions(['--wsgi', __name__ + '.application'])
root = options['root']
self.assertTrue(root, WSGIResource)
self.assertIdentical(root._reactor, reactor)
self.assertTrue(isinstance(root._threadpool, ThreadPool))
self.assertIdentical(root._application, application)
# The threadpool should start and stop with the reactor.
self.assertFalse(root._threadpool.started)
reactor.fireSystemEvent('startup')
self.assertTrue(root._threadpool.started)
self.assertFalse(root._threadpool.joined)
reactor.fireSystemEvent('shutdown')
self.assertTrue(root._threadpool.joined)
def test_invalidApplication(self):
"""
If I{--wsgi} is given an invalid name, L{Options.parseOptions}
raises L{UsageError}.
"""
options = Options()
for name in [__name__ + '.nosuchthing', 'foo.']:
exc = self.assertRaises(
UsageError, options.parseOptions, ['--wsgi', name])
self.assertEqual(str(exc),
"No such WSGI application: %r" % (name,))
def test_HTTPSFailureOnMissingSSL(self):
"""
An L{UsageError} is raised when C{https} is requested but there is no
support for SSL.
"""
options = Options()
exception = self.assertRaises(
UsageError, options.parseOptions, ['--https=443'])
self.assertEqual('SSL support not installed', exception.args[0])
if requireModule('OpenSSL.SSL') is not None:
test_HTTPSFailureOnMissingSSL.skip = 'SSL module is available.'
def test_HTTPSAcceptedOnAvailableSSL(self):
"""
When SSL support is present, it accepts the --https option.
"""
options = Options()
options.parseOptions(['--https=443'])
self.assertEqual('443', options['https'])
if requireModule('OpenSSL.SSL') is None:
test_HTTPSAcceptedOnAvailableSSL.skip = 'SSL module is not available.'
| |
# Copyright (C) 2013-2020 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: Wlodzimierz Wencel
import time
import random
import logging
from locale import str
from scapy.all import sr
from scapy.layers import dns
from scapy.layers.inet import IP, UDP
from scapy.layers.dhcp6 import IPv6
from forge_cfg import world
log = logging.getLogger('forge')
dnstypes = {"ANY": 0,
"ALL": 255,
"A": 1,
"NS": 2,
"MD": 3,
#"MD": 4,
"CNAME": 5,
"SOA": 6,
"MB": 7,
"MG": 8,
"MR": 9,
"NULL": 10,
"WKS": 11,
"PTR": 12,
"HINFO": 13,
"MINFO": 14,
"MX": 15,
"TXT": 16,
"RP": 17,
"AFSDB": 18,
"AAAA": 28,
"SRV": 33,
"A6": 38,
"DNAME": 39,
"IXFR": 251,
"AXFR": 252,
"MAILB": 253,
"MAILA": 254}
dnsclasses = {"IN": 1,
"CS": 2,
"CH": 3,
"HS": 4,
"ANY": 255}
op_codes = {"QUERY": 0,
"IQUERY": 1,
"STATUS": 2}
r_codes = {"OK": 0,
"FORMAT-ERROR": 1,
"SERVER-FAILURE": 2,
"NAME-ERROR": 3,
"NOT-IMPLEMENTED": 4,
"REFUSED": 5}
def prepare_query():
world.climsg = []
build_query()
build_msg()
def send_wait_for_query(choose_must, expect_include):
if world.f_cfg.show_packets_from in ['both', 'client']:
world.climsg[0].show()
timeout = world.cfg["wait_interval"] + world.dns_send_query_time_out
log.info('sending DNS query, attempt %d/%d, timeout %.1f',
world.dns_send_query_counter,
world.f_cfg.dns_retry,
timeout)
ans, unans = sr(world.climsg,
iface=world.cfg["dns_iface"],
timeout=timeout,
multi=True,
verbose=99)
world.dns_send_query_counter += 1
world.dns_send_query_time_out += 0.5
world.dns_qd = []
world.dns_an = []
world.dns_ns = []
world.dns_ar = []
world.srvmsg = []
for x in ans:
a, b = x
world.srvmsg.append(b.getlayer(2))
if world.f_cfg.show_packets_from in ['both', 'server']:
try: # that is temp solution until we have good respond system checking!
world.srvmsg[0].show()
except:
pass
if expect_include:
# if message was not received but expected, resend query with higher timeout
if len(world.srvmsg) == 0 and world.dns_send_query_counter <= world.f_cfg.dns_retry:
time.sleep(1)
send_wait_for_query(choose_must, expect_include)
else:
assert len(world.srvmsg) != 0, "No response received."
elif not expect_include:
assert len(world.srvmsg) == 0, "Response received, not expected"
msg = world.srvmsg[0]
assert hasattr(msg, 'qd'), 'qd field not present in DNS response'
if msg.qd is not None:
for each in msg.qd:
world.dns_qd.append(each.copy())
assert hasattr(msg, 'an'), 'an field not present in DNS response'
if msg.an is not None:
for each in msg.an:
world.dns_an.append(each.copy())
assert hasattr(msg, 'ns'), 'ns field not present in DNS response'
if msg.ns is not None:
for each in msg.ns:
world.dns_ns.append(each.copy())
assert hasattr(msg, 'ar'), 'ar field not present in DNS response'
if msg.ar is not None:
for each in msg.ar:
world.dns_ar.append(each.copy())
def build_query():
# TODO all those should have ability to be set from test level
world.dns_send_query_counter = 0 # let's put counter to zero for each new query
world.dns_send_query_time_out = 0.5
msg = dns.DNS(id=1,
qr=0,
opcode="QUERY",
aa=0,
tc=0,
rd=0,
ra=0,
z=0,
rcode="ok",
qdcount=1,
ancount=0,
nscount=0,
arcount=0)
# if there will be need we could build here answers, authoritative_nameservers and additional_records.
if hasattr(world, 'question_record'):
msg.qd = world.question_record
world.dns_query = msg
def dns_question_record(addr, my_qtype, my_qclass):
assert my_qtype in dnstypes, "Unsupported question type " + my_qtype
dnstype_code = dnstypes.get(my_qtype)
assert my_qclass in dnsclasses, "Unsupported question type " + my_qclass
dnsclass_code = dnsclasses.get(my_qclass)
world.question_record = dns.DNSQR(qname=addr, qtype=dnstype_code, qclass=dnsclass_code)
def build_msg():
if world.proto == "v6":
msg = IPv6(dst=world.cfg["dns6_addr"])/UDP(sport=world.cfg["dns_port"], dport=world.cfg["dns_port"])
else:
msg = IP(dst=world.cfg["dns4_addr"])/UDP(sport=world.cfg["dns_port"], dport=world.cfg["dns_port"])
msg.trid = random.randint(0, 256*256*256)
world.climsg.append(msg/world.dns_query)
def check_dns_respond(expect, data_type, expected_data_value):
if data_type == 'opcode':
data_type = op_codes.get(data_type)
received = world.srvmsg[0].opcode
elif data_type == 'rcode':
data_type = r_codes.get(data_type)
received = world.srvmsg[0].rcode
else:
try:
received = getattr(world.srvmsg[0], data_type.lower())
except AttributeError:
assert False, "There is no value named: {data_type}".format(**locals())
if expected_data_value.isnumeric():
expected_data_value = int(expected_data_value)
flag = 0
if expected_data_value == received:
flag = 1 # if we found what we were looking for change flag to 1
if expect and flag == 0:
assert False, "Invalid {data_type} received {received} but expected: {expected_data_value}.".format(**locals())
if not expect and flag == 1:
assert False, "Invalid {data_type} received {received} that" \
" value has been excluded from correct values.".format(**locals())
def _resend_query(exp, name):
time.sleep(3)
send_wait_for_query('MUST', True)
check_dns_option(exp, name)
def report_dns_option(flag, expect_include, name):
if flag and not expect_include:
if world.dns_send_query_counter <= world.f_cfg.dns_retry:
_resend_query(False, name)
else:
assert False, 'In received DNS query part: "{name}" is NOT empty as we expected.'.format(**locals())
elif not flag and expect_include:
# this is where we had huge amount of failures on jenkins, let's bring here retries.
if world.dns_send_query_counter <= world.f_cfg.dns_retry:
_resend_query(True, name)
else:
assert False, 'In received DNS query part: "{name}" is empty.'.format(**locals())
def check_dns_option(expect_include, part_name):
flag = 0
if part_name == 'QUESTION':
if len(world.dns_qd) > 0:
flag = 1
elif part_name == 'ANSWER':
if len(world.dns_an) > 0:
flag = 1
elif part_name == 'AUTHORITATIVE_NAMESERVERS':
if len(world.dns_ns) > 0:
flag = 1
elif part_name == 'ADDITIONAL_RECORDS':
if len(world.dns_ar) > 0:
flag = 1
report_dns_option(flag, expect_include, part_name)
def parsing_received_parts(query_part_list, length, expect, value_name, value):
outcome = ""
for number in range(length):
try:
test = getattr(query_part_list[number], value_name.lower())
except AttributeError:
assert False, "There is no value named: {value_name}".format(**locals())
if isinstance(test, int):
test = str(test)
if isinstance(test, bytes):
test = test.decode('utf-8')
if test == value:
return 1, test
outcome = outcome + test + ' '
else:
return 0, outcome
def dns_option_content(part_name, expect, value_name, value):
flag = 0
if part_name == 'QUESTION':
flag, outcome = parsing_received_parts(world.srvmsg[0].qd, world.srvmsg[0].qdcount, expect, value_name, value)
elif part_name == 'ANSWER':
flag, outcome = parsing_received_parts(world.srvmsg[0].an, world.srvmsg[0].ancount, expect, value_name, value)
elif part_name == 'AUTHORITATIVE_NAMESERVERS':
flag, outcome = parsing_received_parts(world.srvmsg[0].ns, world.srvmsg[0].nscount, expect, value_name, value)
elif part_name == 'ADDITIONAL_RECORDS':
flag, outcome = parsing_received_parts(world.srvmsg[0].ar, world.srvmsg[0].arcount, expect, value_name, value)
if not flag and expect:
assert False, 'In received DNS query part: "{value_name}" there is/are values:' \
' {outcome} expected was: {value}'.format(**locals())
elif flag and not expect:
assert False, 'In received DNS query part: "{value_name}" there is value:' \
' {outcome} which was forbidden to show up.'.format(**locals())
| |
from PySide.QtCore import *
from PySide.QtGui import *
import sys
import person
import worker
# import spouse
# import attacher
import csv_handle
import re
import datetime
class MainDialog(QMainWindow, worker.Ui_MainWindow):
def __init__(self,parent=None):
super().__init__()
self.setupUi(self)
self.peopleList.currentItemChanged.connect(self.select_in_list)
self.spouseList.currentItemChanged.connect(self.marital_info)
self.nicknameEdit.textChanged.connect(self.update_nick_name)
self.realnameEdit.editingFinished.connect(self.update_real_name)
self.bdayEdit.editingFinished.connect(self.update_bday)
self.femaleRadio.toggled.connect(self.update_gender)
self.maleRadio.toggled.connect(self.update_gender)
self.ddayRadio.toggled.connect(self.update_death)
self.ddayEdit.editingFinished.connect(self.update_death)
self.impRadio.toggled.connect(self.update_impflg)
self.notesEdit.textChanged.connect(self.update_notes)
self.fatherButton.clicked.connect(self.open_father)
self.motherButton.clicked.connect(self.open_mother)
self.spouseList.itemDoubleClicked.connect(self.open_spouse)
self.childrenList.itemDoubleClicked.connect(self.open_child)
self.newFamilyButton.clicked.connect(self.new_family)
self.saveFamilyButton.clicked.connect(self.save_family)
self.loadFamilyButton.clicked.connect(self.open_family)
self.actionAttach.triggered.connect(self.attacher_window)
self.deletePersonButton.clicked.connect(self.delete_person)
self.createParentsButton.clicked.connect(self.create_parents)
self.createSpouseButton.clicked.connect(self.create_spouse)
self.createChildButton.clicked.connect(self.create_child)
self.attachParentsButton.clicked.connect(self.attach_parents)
self.attachSpouseButton.clicked.connect(self.attach_spouse)
self.attachChildButton.clicked.connect(self.attach_child)
self.unattachParentsButton.clicked.connect(self.unattach_parents)
self.unattachSpouseButton.clicked.connect(self.unattach_spouse)
self.unattachChildButton.clicked.connect(self.unattach_child)
self.actionUnAll.triggered.connect(self.unattach_all) # remove out
self.exitButton.clicked.connect(self.exitApp)
self.actionEdit_Spouse.triggered.connect(self.spouse_window)
self.firstStatusRadio.toggled.connect(self.update_status)
self.secondStatusRadio.toggled.connect(self.update_status)
self.thirdStatusRadio.toggled.connect(self.update_status)
self.spouseInfo.editingFinished.connect(self.update_anniversary)
self.secondaryList.currentItemChanged.connect(self.select_in_secondary)
self.saveBox.clicked.connect(self.save_attached)
self.moveUpSpouse.clicked.connect(self.move_spouse_up)
self.moveDnSpouse.clicked.connect(self.move_spouse_down)
self.moveUpChild.clicked.connect(self.move_child_up)
self.moveDnChild.clicked.connect(self.move_child_down)
self.fatherButton.setFlat(True)
self.motherButton.setFlat(True)
self.primaryList.hide()
self.primaryListLabel.hide()
self.secondaryList.hide()
self.secondaryListLabel.hide()
self.saveBox.hide()
# self.new_family()
self.people = csv_handle.load('save4.csv')
for a in sorted(self.people):
self.peopleList.addItem(a + ' - ' + self.people[a].nick_name)
self.peopleList.setCurrentRow(0)
def new_family(self):
self.people = {}
self.peopleList.clear()
k = self.get_next_number()
self.people[k] = person.person(k, gender='M')
self.peopleList.addItem(k + ' - ' + self.people[k].nick_name)
self.peopleList.setCurrentRow(0)
def save_family(self):
try:
fname, _ = QFileDialog.getSaveFileName(self, 'Save Family File', '.', '*.csv')
csv_handle.save(fname, self.people)
except FileNotFoundError:
pass
def open_family(self):
try:
fname, _ = QFileDialog.getOpenFileName(self, 'Open Family File', '.', '*.csv')
self.people = {}
self.peopleList.clear()
self.people = csv_handle.load(fname)
for a in sorted(self.people):
self.peopleList.addItem(a + ' - ' + self.people[a].nick_name)
except FileNotFoundError:
pass
def select_in_list(self, cur, prev):
try:
self.key = cur.text()[:3]
self.spouse = ''
except AttributeError:
print('attribute error in select_in_list')
return
self.keyEdit.setText(self.key)
self.nicknameEdit.setText(self.people[self.key].nick_name)
if self.people[self.key].age > 1:
self.bdayEdit.setText(self.people[self.key].birth + ' ({} years)'.format(self.people[self.key].age))
else:
self.bdayEdit.setText(self.people[self.key].birth)
self.realnameEdit.setText(self.people[self.key].real_name)
self.ddayEdit.setText(self.people[self.key].death)
try:
self.fatherButton.setText(self.people[self.key].father_id + ' - '
+ self.people[self.people[self.key].father_id].nick_name)
except KeyError:
self.fatherButton.setText('')
except:
print("Unexpected error:", sys.exc_info())
raise
try:
self.motherButton.setText(self.people[self.key].mother_id + ' - '
+ self.people[self.people[self.key].mother_id].nick_name)
except KeyError:
self.motherButton.setText('')
except:
print("Unexpected error:", sys.exc_info())
raise
if self.people[self.key].dday:
self.ddayRadio.setChecked(True)
else:
self.ddayRadio.setChecked(False)
if self.people[self.key].gender == 'F':
self.femaleRadio.setChecked(True)
elif self.people[self.key].gender == 'M':
self.maleRadio.setChecked(True)
if self.people[self.key].imp_flg:
self.impRadio.setChecked(True)
else:
self.impRadio.setChecked(False)
self.notesEdit.setText(self.people[self.key].notes)
self.spouseList.clear()
for a in self.process_marriage(self.key):
self.spouseList.addItem(a)
self.spouseList.setCurrentRow(0)
self.childrenList.clear()
for a in self.process_kids(self.key):
self.childrenList.addItem(a)
self.childrenList.setCurrentRow(0)
def process_marriage(self, key):
result = []
if self.people[key].marriage[0][0] == 'S':
self.firstStatusRadio.setChecked(True)
self.secondStatusRadio.hide()
self.thirdStatusRadio.hide()
return result
else:
for l in self.people[key].marriage:
# print(l)
if l[0] == 'D':
ret = l[1:4] + ' - ' + self.people[l[1:4]].nick_name + " (Divorced)"
elif l[0] == 'E':
ret = l[1:4] + ' - ' + self.people[l[1:4]].nick_name + " (Engaged)"
elif self.people[l[1:4]].death and not self.people[key].death:
ret = l[1:4] + ' - ' + self.people[l[1:4]].nick_name + " (Deceased)"
else:
ret = l[1:4] + ' - ' + self.people[l[1:4]].nick_name
result.append(ret)
return result
def process_kids(self, key):
result = []
for l in self.people[key].children:
if l:
result.append(l + ' - ' + self.people[l].nick_name)
return result
def update_nick_name(self):
nck = self.nicknameEdit.text()
self.people[self.key].update('nick_name', nck)
self.peopleList.currentItem().setText(self.key + ' - ' + nck)
def update_real_name(self):
self.people[self.key].update('real_name', self.realnameEdit.text())
def update_bday(self):
formats = ['%Y%m%d', '%Y%m', '%Y', '%m%d']
txt = self.bdayEdit.text()
m = re.match('^[0-9]{8}$|'
'^[0-9]{6}$|'
'^[0-9]{4}$|'
'^[x]{4}[0-9]{2}$|'
'^[x]{4}[0-9]{4}$', txt)
if m:
for frmt in formats:
try:
if datetime.datetime.strptime(txt, frmt):
self.people[self.key].update('bday', self.bdayEdit.text())
break
except ValueError:
try:
if datetime.datetime.strptime(txt.replace('x', ''), frmt):
self.people[self.key].update('bday', self.bdayEdit.text())
break
except ValueError:
pass
if self.people[self.key].age > 1:
self.bdayEdit.setText(self.people[self.key].birth + ' ({} years)'.format(self.people[self.key].age))
else:
self.bdayEdit.setText(self.people[self.key].birth)
def update_gender(self):
if self.maleRadio.isChecked() and self.sender().text() == 'Male':
self.people[self.key].update('gender', 'M')
elif self.femaleRadio.isChecked() and self.sender().text() == 'Female':
self.people[self.key].update('gender', 'F')
def update_death(self):
if not self.ddayRadio.isChecked():
self.people[self.key].update('dday', 0)
else:
formats = ['%Y%m%d', '%Y%m', '%Y', '%m%d']
birth = self.people[self.key].bday
death = self.ddayEdit.text()
m = re.match('^[0-9]{8}$|'
'^[0-9]{6}$|'
'^[0-9]{4}$|', birth)
n = re.match('^[0-9]{8}$|'
'^[0-9]{6}$|'
'^[0-9]{4}$|', death)
if m.group() and n.group():
for frmt1 in formats:
for frmt2 in formats:
# print(frmt1, frmt2)
try:
if datetime.datetime.strptime(birth, frmt1) and datetime.datetime.strptime(death, frmt2):
b = datetime.datetime.strptime(birth, frmt1)
d = datetime.datetime.strptime(death, frmt2)
if b < d:
self.people[self.key].update('dday', self.ddayEdit.text())
else:
print('invalid')
except ValueError:
pass
except:
print(sys.exc_info())
elif n.group():
self.people[self.key].update('dday', self.ddayEdit.text())
if self.people[self.key].age > 1:
self.bdayEdit.setText(self.people[self.key].birth + ' ({} years)'.format(self.people[self.key].age))
else:
self.bdayEdit.setText(self.people[self.key].birth)
self.ddayEdit.setText(self.people[self.key].death)
def update_impflg(self):
if self.impRadio.isChecked():
self.people[self.key].update('imp_flg', '1')
else:
self.people[self.key].update('imp_flg', '')
def update_notes(self):
self.people[self.key].update('notes', self.notesEdit.toPlainText())
def get_next_number(self):
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
text = sorted(self.people.keys())
for a in alphabet:
for b in alphabet:
for c in alphabet:
test = a+b+c
if test not in text and test != '000':
return test
def create_parents(self):
# print(self.people[self.key].father_id, self.people[self.key].mother_id)
father = self.people[self.key].father_id
if not father:
k = self.get_next_number()
self.people[k] = person.person(k, gender='M', children=[self.key])
self.people[self.key].update('father_id', k)
self.peopleList.addItem(k + ' - ' + self.people[k].nick_name)
father = k
mother = self.people[self.key].mother_id
if not mother:
k = self.get_next_number()
self.people[k] = person.person(k, gender='F', children=[self.key])
self.people[self.key].update('mother_id', k)
self.peopleList.addItem(k + ' - ' + self.people[k].nick_name)
mother = k
# print(father, mother)
self.people[father].update('marriage', ['M' + mother])
self.people[mother].update('marriage', ['M' + father])
self.select_in_list(self.peopleList.currentItem(), '')
def create_spouse(self):
k = self.get_next_number()
g = 'F' if self.people[self.key].gender == 'M' else 'M'
self.people[k] = person.person(k, gender=g, marriage=['M' + self.key])
self.peopleList.addItem(k + ' - ' + self.people[k].nick_name)
if self.people[self.key].marriage[0][0] == 'S':
self.people[self.key].marriage = ['M' + k]
else:
self.people[self.key].marriage.append('M' + k)
self.peopleList.setCurrentItem(self.peopleList.findItems(k + ' - ' + self.people[k].nick_name, Qt.MatchExactly)[0])
def create_child(self):
try:
spouse = self.spouseList.currentItem().text()[:3]
if self.marital_check_for_create_child(self.key, spouse):
k = self.get_next_number()
if self.people[self.key].gender == 'M':
self.people[k] = person.person(k, gender='M', father_id=self.key, mother_id=spouse)
elif self.people[self.key].gender == 'F':
self.people[k] = person.person(k, gender='F', father_id=spouse, mother_id=self.key)
if self.people[self.key].children:
self.people[self.key].children.append(k)
else:
self.people[self.key].children = [k]
if self.people[spouse].children:
self.people[spouse].children.append(k)
else:
self.people[spouse].children = [k]
self.peopleList.addItem(k + ' - ' + self.people[k].nick_name)
self.peopleList.setCurrentItem(self.peopleList.findItems(k + ' - ' + self.people[k].nick_name, Qt.MatchExactly)[0])
except AttributeError:
pass
def marital_check(self, person):
for a in self.people[person].marriage:
if a[0] in ['M', 'D']:
return True
return False
def enable_inputs(self, bool):
self.nicknameEdit.setEnabled(bool)
self.realnameEdit.setEnabled(bool)
self.bdayEdit.setEnabled(bool)
self.maleRadio.setEnabled(bool)
self.femaleRadio.setEnabled(bool)
self.firstStatusRadio.setEnabled(bool)
self.secondStatusRadio.setEnabled(bool)
self.thirdStatusRadio.setEnabled(bool)
self.ddayRadio.setEnabled(bool)
self.ddayEdit.setEnabled(bool)
self.notesEdit.setEnabled(bool)
self.fatherButton.setEnabled(bool)
self.motherButton.setEnabled(bool)
self.spouseList.setEnabled(bool)
self.spouseInfo.setEnabled(bool)
self.childrenList.setEnabled(bool)
self.newFamilyButton.setEnabled(bool)
self.saveFamilyButton.setEnabled(bool)
self.loadFamilyButton.setEnabled(bool)
self.createSpouseButton.setEnabled(bool)
self.createChildButton.setEnabled(bool)
self.createParentsButton.setEnabled(bool)
self.deletePersonButton.setEnabled(bool)
self.attachChildButton.setEnabled(bool)
self.attachParentsButton.setEnabled(bool)
self.attachSpouseButton.setEnabled(bool)
self.unattachChildButton.setEnabled(bool)
self.unattachParentsButton.setEnabled(bool)
self.unattachSpouseButton.setEnabled(bool)
def attach_parents(self):
if self.people[self.key].father_id or self.people[self.key].mother_id:
return
self.enable_inputs(False)
self.secondaryList.show()
self.secondaryListLabel.show()
self.secondaryListLabel.setText('Select First Parent:')
self.primaryList.show()
self.primaryListLabel.show()
self.primaryListLabel.setText('Select Second Parent:')
self.saveBox.show()
self.secondaryList.clear()
for a in sorted(self.people):
if self.marital_check(a):
self.secondaryList.addItem(a + ' - ' + self.people[a].nick_name)
self.secondaryList.setCurrentRow(0)
self.attach = 'parents'
# self.secondaryList.clear()
# for a in sorted(self.people):
# if not (self.people[a].father_id or self.people[a].mother_id):
# self.secondaryList.addItem(a + ' - ' + self.people[a].nick_name)
#
# self.secondaryList.setCurrentRow(0)
def select_in_secondary(self, cur, prev):
if cur:
parentone = cur.text()[:3]
self.primaryList.clear()
for a in self.people[parentone].marriage:
if a[0] in ['M', 'D']:
self.primaryList.addItem(a[1:4] + ' - ' + self.people[a[1:4]].nick_name)
self.primaryList.setCurrentRow(0)
def attach_spouse(self):
self.enable_inputs(False)
self.primaryList.show()
self.primaryListLabel.show()
self.primaryListLabel.setText('Select Spouse:')
self.saveBox.show()
gender = 'F' if self.people[self.key].gender == 'M' else 'M'
for a in sorted(self.people):
if self.people[a].gender == gender:
self.primaryList.addItem(a + ' - ' + self.people[a].nick_name)
self.primaryList.setCurrentRow(0)
self.attach = 'spouse'
def attach_child(self):
if not self.spouseList.currentItem():
return
elif self.secondStatusRadio.isChecked():
return
self.enable_inputs(False)
self.primaryList.show()
self.primaryListLabel.show()
self.primaryListLabel.setText('Select Child:')
self.saveBox.show()
for a in sorted(self.people):
if not self.people[a].father_id and not self.people[a].mother_id and a != self.key:
self.primaryList.addItem(a + ' - ' + self.people[a].nick_name)
self.primaryList.setCurrentRow(0)
self.attach = 'child'
def save_attached(self, button):
if button.text().lower() == 'save':
if self.attach == 'spouse':
spouse = self.primaryList.currentItem().text()[:3]
if self.people[self.key].marriage[0][0] == 'S':
self.people[self.key].marriage = ['M' + spouse]
else:
self.people[self.key].marriage.append('M' + spouse)
if self.people[spouse].marriage[0][0] == 'S':
self.people[spouse].marriage = ['M' + self.key]
else:
self.people[spouse].marriage.append('M' + self.key)
self.spouseList.clear()
for a in self.process_marriage(self.key):
self.spouseList.addItem(a)
self.spouseList.setCurrentRow(0)
elif self.attach == 'parents':
parentone = self.secondaryList.currentItem().text()[:3]
pone_gender = self.people[parentone].gender
parenttwo = self.primaryList.currentItem().text()[:3]
ptwo_gender = self.people[parenttwo].gender
if pone_gender == 'M' and ptwo_gender == 'F':
self.people[self.key].father_id = parentone
self.people[self.key].mother_id = parenttwo
elif pone_gender == 'F' and ptwo_gender == 'M':
self.people[self.key].mother_id = parentone
self.people[self.key].father_id = parenttwo
else:
raise Exception('Gender error in attaching parents')
if self.people[parentone].children:
self.people[parentone].children.append(self.key)
else:
self.people[parentone].children = [self.key]
if self.people[parenttwo].children:
self.people[parenttwo].children.append(self.key)
else:
self.people[parenttwo].children = [self.key]
self.fatherButton.setText(self.people[self.key].father_id + ' - ' +
self.people[self.people[self.key].father_id].nick_name)
self.motherButton.setText(self.people[self.key].mother_id + ' - ' +
self.people[self.people[self.key].mother_id].nick_name)
elif self.attach == 'child':
self_gender = self.people[self.key].gender
spouse = self.spouseList.currentItem().text()[:3]
spouse_gender = self.people[spouse].gender
child = self.primaryList.currentItem().text()[:3]
if self.people[self.key].children:
self.people[self.key].children.append(child)
else:
self.people[self.key].children = [child]
if self.people[spouse].children:
self.people[spouse].children.append(child)
else:
self.people[spouse].children = [child]
if self_gender == 'M' and spouse_gender == 'F':
self.people[child].father_id = self.key
self.people[child].mother_id = spouse
elif self_gender == 'F' and spouse_gender == 'M':
self.people[child].father_id = spouse
self.people[child].mother_id = self.key
else:
raise Exception('Gender error in attaching parents')
self.childrenList.clear()
for a in self.process_kids(self.key):
self.childrenList.addItem(a)
self.childrenList.setCurrentRow(0)
self.enable_inputs(True)
self.primaryList.clear()
self.primaryList.hide()
self.primaryListLabel.hide()
self.secondaryList.clear()
self.secondaryList.hide()
self.secondaryListLabel.hide()
self.saveBox.hide()
def unattach_parents(self):
father = self.people[self.key].father_id
mother = self.people[self.key].mother_id
if father:
self.people[self.key].update('father_id', '')
self.people[father].children.remove(self.key)
if mother:
self.people[self.key].update('mother_id', '')
self.people[mother].children.remove(self.key)
def unattach_spouse(self, spouse = ''):
if not spouse:
spouse = child = self.spouseList.currentItem().text()[:3]
if self.have_common_children(self.key, spouse):
print('Common children. Cannot unattach')
else:
for p in self.people[self.key].marriage:
try:
m = re.match('^[DM]' + spouse + '[_*a-zA-Z0-9]*', p)
mine = m.group()
yours = m.group()[0] + self.key + m.group()[4:]
self.people[self.key].marriage.remove(mine)
if not self.people[self.key].marriage:
self.people[self.key].marriage = 'S000'
self.people[spouse].marriage.remove(yours)
if not self.people[spouse].marriage:
self.people[spouse].marriage = 'S000'
except AttributeError:
pass
def have_common_children(self, key, spouse):
for ch1 in self.people[key].children:
for ch2 in self.people[spouse].children:
if ch1 and ch1 == ch2:
return True
return False
def unattach_child(self, child = ''):
if not child:
child = self.childrenList.currentItem().text()[:3]
if self.people[child].father_id == self.key:
other_parent = self.people[child].mother_id
elif self.people[child].mother_id == self.key:
other_parent = self.people[child].father_id
else:
print('booboo in unattach_child')
self.people[child].update('father_id', '')
self.people[child].update('mother_id', '')
self.people[self.key].children.remove(child)
self.people[other_parent].children.remove(child)
def unattach_all(self):
self.unattach_parents()
for a in self.people[self.key].children[:]:
self.unattach_child(a)
for a in self.people[self.key].marriage[:]:
self.unattach_spouse(a[1:4])
def marital_check_for_create_child(self, person, spouse):
for a in self.people[person].marriage:
if a[1:4] == spouse and a[0] in ['M', 'D']:
return True
return False
def spouse_window(self):
try:
self.spouseWindow = SpouseDialog(self.key, self.spouseList.currentItem().text()[:3], self.people)
self.spouseWindow.show()
except AttributeError:
pass
def attacher_window(self):
self.attacherWindow = AttacherDialog(self.key, self.people)
self.attacherWindow.show()
def delete_person(self):
if len(self.people) < 2:
print('Cannot delete last person')
return
father = self.people[self.key].father_id
mother = self.people[self.key].mother_id
spouse = self.people[self.key].marriage
children = self.people[self.key].children
if father:
self.people[father].children.remove(self.key)
if mother:
self.people[mother].children.remove(self.key)
if spouse != ['S000']:
for sp in spouse:
for m in self.people[sp[1:4]].marriage:
if m[1:4] == self.key:
self.people[sp[1:4]].marriage.remove(m)
if self.people[sp[1:4]].marriage == []:
self.people[sp[1:4]].marriage = ['S000']
if children:
for child in children:
if self.people[self.key].gender == 'M':
self.people[child].father_id = ''
elif self.people[self.key].gender == 'F':
self.people[child].mother_id = ''
self.people.pop(self.key)
self.peopleList.takeItem(self.peopleList.row(self.peopleList.currentItem()))
def open_father(self):
try:
self.peopleList.setCurrentItem(self.peopleList.findItems(self.fatherButton.text(), Qt.MatchExactly)[0])
except IndexError:
pass
def open_mother(self):
try:
self.peopleList.setCurrentItem(self.peopleList.findItems(self.motherButton.text(), Qt.MatchExactly)[0])
except IndexError:
pass
def open_spouse(self, item):
try:
self.peopleList.setCurrentItem(self.peopleList.findItems(item.text(), Qt.MatchExactly)[0])
except IndexError:
self.peopleList.setCurrentItem(self.peopleList.findItems(item.text()[:item.text().index(' (')], Qt.MatchExactly)[0])
def open_child(self, item):
self.peopleList.setCurrentItem(self.peopleList.findItems(item.text(), Qt.MatchExactly)[0])
def get_marital_death(self):
d1 = self.people[self.key].dday
d2 = self.people[self.spouse].dday
d1 = '' if d1 == '0' else d1
d2 = '' if d2 == '0' else d2
if d1 and not d2:
return d1
if d2 and not d1:
return d2
if not d1 and not d2:
return ''
if int(d1[:4]) < int(d2[:4]):
return d1
if int(d2[:4]) < int(d1[:4]):
return d2
if int(d1[4:6]) < int(d2[4:6]):
return d1
if int(d2[4:6]) < int(d1[4:6]):
return d2
if int(d1[6:]) < int(d2[6:]):
return d1
if int(d2[6:]) < int(d1[6:]):
return d2
def marital_info(self, cur, prev):
months = {'01':'Jan', '02':'Feb', '03':'Mar', '04':'Apr', '05':'May',
'06':'Jun', '07':'Jul', '08':'Aug', '09':'Sep', '10':'Oct',
'11':'Nov', '12':'Dec'}
single = False
self.spouse = ''
try:
# self.spouse = self.spouseList.currentItem().text()[:3]
self.spouse = cur.text()[:3]
except AttributeError:
self.spouseInfo.clear()
# self.statusToEdit.clear()
single = True
status = 'S'
m = d = ''
for a in self.people[self.key].marriage:
if a[1:4] == self.spouse:
status = a[0]
try:
m = a.split('_')[1].split('*')[0]
except IndexError:
m = ''
# try:
# d = a.split('_')[1].split('*')[1]
# except IndexError:
# d = ''
try:
d = self.get_marital_death
except KeyError as e:
if e.args[0] == '':
pass
else:
print("Unexpected error:", sys.exc_info())
raise
print(m,d)
if status == 'S':
self.firstStatusRadio.setText('Single')
self.secondStatusRadio.hide()
self.thirdStatusRadio.hide()
# self.statusToEdit.setReadOnly(True)
self.firstStatusRadio.setChecked(True)
elif status in ['E', 'M', 'D']:
self.firstStatusRadio.setText('Married')
self.secondStatusRadio.setText('Engaged')
self.thirdStatusRadio.setText('Divorced')
self.secondStatusRadio.show()
self.thirdStatusRadio.show()
self.firstStatusRadio.setChecked(status == 'M')
self.secondStatusRadio.setChecked(status == 'E')
self.thirdStatusRadio.setChecked(status == 'D')
else:
raise Exception('Error in marital status retrieval in marital_info(self, cur, prev)')
if single:
return
m_year = d_year = ''
m_month = d_month = ''
m_day = d_day = ''
l = []
if len(m) >= 4 and m[0:4] != 'xxxx':
m_year = int(m[0:4])
l.append(m[0:4])
if len(m) >= 6:
m_month = int(m[4:6])
l.append(months[m[4:6]])
if len(m) >= 8:
l.append(m[6:8])
m_out = '-'.join(l)
l = []
if len(d) >= 4:
d_year = int(d[0:4])
l.append(d[0:4])
if len(d)>= 6:
d_month = int(d[4:6])
if len(l) == 1:
l.append(months[d[4:6]])
if len(d) >= 8:
d_day = int(d[6:8])
if len(l) == 2:
l.append(d[6:8])
d_out = '-'.join(l)
age = -1
if m_year and d_year:
if m_month and d_month:
if m_day and d_day:
if (d_month > m_month) or (d_month == m_month and d_day >= m_day):
age = d_year - m_year
else:
age = d_year - m_year - 1
else:
if d_month >= m_month:
age = d_year - m_year
else:
age = d_year - m_year - 1
else:
age = d_year - m_year
else:
now = datetime.datetime.now()
if m_year:
if m_month:
if m_day:
if (now.month > m_month) or (now.month == m_month and now.day >= m_day):
age = now.year - m_year
else:
age = now.year - m_year - 1
else:
if now.month >= m_month:
age = now.year - m_year
else:
age = now.year - m_year - 1
else:
age = now.year - m_year
p = ''
# if age >= 0:
# p = '{} years'.format(age)
#
# if m_out:
# if p:
# p += ' - {}'.format(m_out)
# else:
# p = m_out
# if d_out:
# p += ' to {}'.format(d_out)
# else:
# if self.spouse and not (self.people[self.key].death or self.people[self.spouse].death):
# p += ' to present'
if m_out:
p = m_out
if age >= 0 and status == 'M':
p += ' ({} years)'.format(age)
self.spouseInfo.setText(p)
if d_out:
pass
# self.statusToEdit.setText(d_out)
elif status == 'M' and m_out:
pass
# self.statusToEdit.setText('present')
else:
pass
# self.statusToEdit.clear()
def move_spouse_up(self):
spouse = self.spouseList.currentItem().text()[:3]
for n, p in enumerate(self.people[self.key].marriage):
m = re.match('^[EDM]' + self.spouse + '[_*a-zA-Z0-9]*', p)
try:
if p == m.group():
self.people[self.key].marriage[n], self.people[self.key].marriage[n-1] = self.people[self.key].marriage[n-1], self.people[self.key].marriage[n]
break
except AttributeError:
# print(sys.exc_info())
pass
# for n, p in enumerate(self.people[self.spouse].marriage):
# m = re.match('^[EDM]' + self.key + '[_*a-zA-Z0-9]*', p)
# try:
# if p == m.group():
# self.people[self.spouse].marriage[n] = status + self.people[self.spouse].marriage[n][1:]
# break
# except AttributeError:
# # print(sys.exc_info())
# pass
self.spouseList.clear()
for a in self.process_marriage(self.key):
self.spouseList.addItem(a)
self.spouseList.setCurrentRow(0)
def move_spouse_down(self):
spouse = self.spouseList.currentItem().text()[:3]
for n, p in enumerate(self.people[self.key].marriage):
m = re.match('^[EDM]' + self.spouse + '[_*a-zA-Z0-9]*', p)
try:
if p == m.group():
self.people[self.key].marriage[n], self.people[self.key].marriage[n+1] = self.people[self.key].marriage[n+1], self.people[self.key].marriage[n]
break
except AttributeError:
# print(sys.exc_info())
pass
self.spouseList.clear()
for a in self.process_marriage(self.key):
self.spouseList.addItem(a)
self.spouseList.setCurrentRow(0)
def move_child_up(self):
child = self.childrenList.currentItem().text()[:3]
for n, p in enumerate(self.people[self.key].children):
if child == p:
self.people[self.key].children[n], self.people[self.key].children[n-1] = self.people[self.key].children[n-1], self.people[self.key].children[n]
c1 = self.people[self.key].children[n]
c2 = self.people[self.key].children[n-1]
break
if self.people[c1].father_id == self.people[c2].father_id and self.people[c1].mother_id == self.people[c2].mother_id:
parent = self.people[c1].mother_id if self.people[c1].father_id == self.key else self.people[c1].father_id
i1 = self.people[parent].children.index(c1)
i2 = self.people[parent].children.index(c2)
self.people[parent].children[i1], self.people[parent].children[i2] = self.people[parent].children[i2], self.people[parent].children[i1]
self.childrenList.clear()
for a in self.process_kids(self.key):
self.childrenList.addItem(a)
def move_child_down(self):
child = self.childrenList.currentItem().text()[:3]
for n, p in enumerate(self.people[self.key].children):
if child == p:
c1 = self.people[self.key].children[n]
c2 = self.people[self.key].children[n+1]
self.people[self.key].children[n], self.people[self.key].children[n+1] = self.people[self.key].children[n+1], self.people[self.key].children[n]
break
if self.people[c1].father_id == self.people[c2].father_id and self.people[c1].mother_id == self.people[c2].mother_id:
parent = self.people[c1].mother_id if self.people[c1].father_id == self.key else self.people[c1].father_id
i1 = self.people[parent].children.index(c1)
i2 = self.people[parent].children.index(c2)
self.people[parent].children[i1], self.people[parent].children[i2] = self.people[parent].children[i2], self.people[parent].children[i1]
self.childrenList.clear()
for a in self.process_kids(self.key):
self.childrenList.addItem(a)
def update_status(self):
if self.firstStatusRadio.isChecked() and self.sender().text() == 'Married':
status = 'M'
# self.statusToEdit.setReadOnly(True)
elif self.secondStatusRadio.isChecked() and self.sender().text() == 'Engaged':
status = 'E'
# self.statusToEdit.setReadOnly(True)
elif self.thirdStatusRadio.isChecked() and self.sender().text() == 'Divorced':
status = 'D'
# self.statusToEdit.setReadOnly(False)
else:
# self.statusToEdit.setReadOnly(True)
return
for n, p in enumerate(self.people[self.key].marriage):
m = re.match('^[EDM]' + self.spouse + '[_*a-zA-Z0-9]*', p)
try:
if p == m.group():
self.people[self.key].marriage[n] = status + self.people[self.key].marriage[n][1:]
break
except AttributeError:
# print(sys.exc_info())
pass
for n, p in enumerate(self.people[self.spouse].marriage):
m = re.match('^[EDM]' + self.key + '[_*a-zA-Z0-9]*', p)
try:
if p == m.group():
self.people[self.spouse].marriage[n] = status + self.people[self.spouse].marriage[n][1:]
break
except AttributeError:
# print(sys.exc_info())
pass
def update_anniversary(self):
txt = self.spouseInfo.text()
formats = ['%Y%m%d', '%Y%m', '%Y', '%m%d', '%m']
m = re.match('^[0-9]{8}$|'
'^[0-9]{6}$|'
'^[0-9]{4}$|'
'^[x]{4}[0-9]{2}$|'
'^[x]{4}[0-9]{4}$', txt)
if m:
for frmt in formats:
try:
if datetime.datetime.strptime(txt, frmt):
date = txt
break
except ValueError:
try:
if datetime.datetime.strptime(txt.replace('x', ''), frmt):
date = txt
break
except ValueError:
pass
for n, p in enumerate(self.people[self.key].marriage):
m = re.match('^[DM]' + self.spouse + '[_*a-zA-Z0-9]*', p)
try:
if p == m.group():
start_i = end_i = ''
try:
start_i = self.people[self.key].marriage[n].index('_') + 1
except ValueError:
pass
try:
end_i = self.people[self.key].marriage[n].index('*')
except ValueError:
pass
if start_i and end_i:
self.people[self.key].marriage[n] = self.people[self.key].marriage[n][:start_i] + \
txt + self.people[self.key].marriage[n][end_i:]
elif start_i:
self.people[self.key].marriage[n] = self.people[self.key].marriage[n][:start_i] + txt
else:
self.people[self.key].marriage[n] = self.people[self.key].marriage[n] + '_' + txt
print(self.people[self.key].marriage[n])
break
except AttributeError:
pass
# print('close_spouse_window attribute error')
for n, p in enumerate(self.people[self.spouse].marriage):
m = re.match('^[DM]' + self.key + '[_*a-zA-Z0-9]*', p)
try:
if p == m.group():
start_i = end_i = ''
try:
start_i = self.people[self.spouse].marriage[n].index('_') + 1
except ValueError:
pass
try:
end_i = self.people[self.spouse].marriage[n].index('*')
except ValueError:
pass
if start_i and end_i:
self.people[self.spouse].marriage[n] = self.people[self.spouse].marriage[n][:start_i] + \
txt + self.people[self.spouse].marriage[n][end_i:]
elif start_i:
self.people[self.spouse].marriage[n] = self.people[self.spouse].marriage[n][:start_i] + txt
else:
self.people[self.spouse].marriage[n] = self.people[self.spouse].marriage[n] + '_' + txt
break
except AttributeError:
pass
self.marital_info(self.spouseList.currentItem(), None)
def update_end_marriage(self):
# txt = self.statusToEdit.text()
formats = ['%Y%m%d', '%Y%m', '%Y']
m = re.match('^[0-9]{8}$|'
'^[0-9]{6}$|'
'^[0-9]{4}$', txt)
if m:
for frmt in formats:
try:
if datetime.datetime.strptime(txt, frmt):
date = txt
break
except ValueError:
pass
for n, p in enumerate(self.people[self.key].marriage):
m = re.match('^[D]' + self.spouse + '[_*a-zA-Z0-9]*', p)
try:
if p == m.group():
start_i = ''
try:
start_i = self.people[self.key].marriage[n].index('*') + 1
except ValueError:
pass
if start_i:
self.people[self.key].marriage[n] = self.people[self.key].marriage[n][:start_i] + txt
else:
self.people[self.key].marriage[n] = self.people[self.key].marriage[n] + '*' + txt
print(self.people[self.key].marriage[n])
break
except AttributeError:
pass
# print('close_spouse_window attribute error')
for n, p in enumerate(self.people[self.spouse].marriage):
m = re.match('^[D]' + self.key + '[_*a-zA-Z0-9]*', p)
try:
if p == m.group():
start_i = ''
try:
start_i = self.people[self.spouse].marriage[n].index('*') + 1
except ValueError:
pass
if start_i:
self.people[self.spouse].marriage[n] = self.people[self.spouse].marriage[n][:start_i] + txt
else:
self.people[self.spouse].marriage[n] = self.people[self.spouse].marriage[n] + '*' + txt
print(self.people[self.spouse].marriage[n])
break
except AttributeError:
pass
def exitApp(self):
sys.exit(0)
#
# class SpouseDialog(QMainWindow, spouse.Ui_MainWindow):
# status = ''
# marriage = ''
# m_out = ''
# divorce = ''
# d_out = ''
# people = {}
# key = ''
# spouse = ''
#
# months = {'01':'Jan', '02':'Feb', '03':'Mar', '04':'Apr', '05':'May',
# '06':'Jun', '07':'Jul', '08':'Aug', '09':'Sep', '10':'Oct',
# '11':'Nov', '12':'Dec'}
#
# def __init__(self, key, spouse, people, parent=None):
# super().__init__()
# self.setupUi(self)
#
# # print(key, spouse, people[key].marriage)
# self.people = people
# self.key = key
# self.spouse = spouse
# for a in people[key].marriage:
# if a[1:4] == spouse:
# self.status = a[0]
# try:
# self.marriage = a.split('_')[1].split('*')[0]
# except IndexError:
# self.marriage = ''
# try:
# self.divorce = a.split('_')[1].split('*')[1]
# except IndexError:
# pass
# # print(self.marriage)
#
# # print(self.marriage, self.divorce)
# self.init_fields()
#
# self.engagedButton.toggled.connect(self.update_status)
# self.marriedButton.toggled.connect(self.update_status)
# self.divorcedButton.toggled.connect(self.update_status)
#
# self.marriageEdit.editingFinished.connect(self.update_marriage)
# self.separationEdit.editingFinished.connect(self.update_divorce)
#
# self.buttonBox.clicked.connect(self.close_spouse_window)
#
# def init_fields(self):
# if self.marriage:
# self.dates()
# self.engagedButton.setChecked(self.status == 'E')
# self.marriedButton.setChecked(self.status == 'M')
# self.divorcedButton.setChecked(self.status == 'D')
# if self.status != 'D':
# self.separationEdit.setEnabled(False)
#
# self.marriageEdit.setText(self.m_out)
# self.separationEdit.setText(self.d_out)
#
#
# def dates(self):
# l = []
# m_year = ''
# m_month = ''
# m_day = ''
# d_year = ''
# d_month = ''
# d_day = ''
# if len(self.marriage) >= 4 and self.marriage[0:4] != 'xxxx':
# m_year = int(self.marriage[0:4])
# l.append(self.marriage[0:4])
# if len(self.marriage)>= 6:
# m_month = int(self.marriage[4:6])
# l.append(self.months[self.marriage[4:6]])
# if len(self.marriage) >= 8:
# m_day = int(self.marriage[6:8])
# l.append(self.marriage[6:8])
#
# self.m_out = '-'.join(l)
#
# if self.status == 'D':
# l = []
# if len(self.divorce) >= 4:
# d_year = int(self.divorce[0:4])
# l.append(self.divorce[0:4])
# if len(self.divorce)>= 6:
# d_month = int(self.divorce[4:6])
# if len(l) == 1:
# l.append(self.months[self.divorce[4:6]])
# if len(self.divorce) >= 8:
# d_day = int(self.divorce[6:8])
# if len(l) == 2:
# l.append(self.divorce[6:8])
#
# self.d_out = '-'.join(l)
#
#
# def update_status(self):
# if self.engagedButton.isChecked() and self.sender().text() == 'Engaged':
# self.status = 'E'
# self.separationEdit.setEnabled(False)
# elif self.marriedButton.isChecked() and self.sender().text() == 'Married':
# self.status = 'M'
# self.separationEdit.setEnabled(False)
# elif self.divorcedButton.isChecked() and self.sender().text() == 'Divorced':
# self.status = 'D'
# self.separationEdit.setEnabled(True)
# else:
# pass
#
# def update_marriage(self):
# if self.status in ['M', 'D']:
# formats = ['%Y%m%d', '%Y%m', '%Y', '%m%d']
# start = self.marriageEdit.text()
# m = re.match('^[0-9]{8}$|'
# '^[0-9]{6}$|'
# '^[0-9]{4}$|', start)
#
# if m:
# for frmt in formats:
# try:
# if datetime.datetime.strptime(start, frmt):
# self.marriage = start
# self.dates()
# except ValueError:
# pass
# # for n, p in enumerate(self.people[self.key].marriage):
# # m = re.match('^[DMW]' + self.spouse + '[_a-zA-Z0-9]*', p)
# # if p == m.group():
# # self.people[self.key].marriage[n] = self.status + self.spouse + '_' + txt
# # print(self.marriageEdit.text(), m.group())
#
# self.marriageEdit.setText(self.m_out)
# # self.separationEdit.setText(self.d_out)
#
#
# def update_divorce(self):
# if self.status == 'D':
# formats = ['%Y%m%d', '%Y%m', '%Y', '%m%d']
# start = self.marriage
# end = self.separationEdit.text()
# m = re.match('^[0-9]{8}$|'
# '^[0-9]{6}$|'
# '^[0-9]{4}$|', start)
# n = re.match('^[0-9]{8}$|'
# '^[0-9]{6}$|'
# '^[0-9]{4}$|', end)
#
# if m and n:
# for frmt1 in formats:
# for frmt2 in formats:
# # print(frmt1, frmt2)
# try:
# if datetime.datetime.strptime(start, frmt1) and datetime.datetime.strptime(end, frmt2):
#
# b = datetime.datetime.strptime(start, frmt1)
# d = datetime.datetime.strptime(end, frmt2)
#
# if b < d:
# self.divorce = end
# self.dates()
# else:
# print('invalid')
# except ValueError:
# pass
# except:
# print(sys.exc_info())
#
# # print(self.divorce)
#
# def close_spouse_window(self, button):
# # print(self.marriage, self.divorce)
# if button.text().lower() == 'save':
# for n, p in enumerate(self.people[self.key].marriage):
# m = re.match('^[DM]' + self.spouse + '[_*a-zA-Z0-9]*', p)
# try:
# if p == m.group():
# self.people[self.key].marriage[n] = self.status + self.spouse
# if self.marriage:
# self.people[self.key].marriage[n] += '_' + self.marriage
# if self.divorce:
# self.people[self.key].marriage[n] += '*' + self.divorce
# except AttributeError:
# pass
# # print('close_spouse_window attribute error')
#
# for n, p in enumerate(self.people[self.spouse].marriage):
# m = re.match('^[DM]' + self.key + '[_*a-zA-Z0-9]*', p)
# try:
# if p == m.group():
# self.people[self.spouse].marriage[n] = self.status + self.key
# if self.marriage:
# self.people[self.spouse].marriage[n] += '_' + self.marriage
# if self.divorce:
# self.people[self.spouse].marriage[n] += '*' + self.divorce
# except AttributeError:
# pass
# # print('close_spouse_window attribute error')
#
# self.hide()
#
# class AttacherDialog(QMainWindow, attacher.Ui_MainWindow):
# def __init__(self, key, people, parent=None):
# super().__init__()
# self.setupUi(self)
#
# self.key = key
# self.people = people
#
# self.radioSpouse.toggled.connect(self.spouse_mode)
# self.radioParents.toggled.connect(self.parents_mode)
# self.radioChild.toggled.connect(self.child_mode)
#
# self.primarylist.currentItemChanged.connect(self.select_in_list)
# self.secondarylist.currentItemChanged.connect(self.select_in_second_list)
#
# self.buttonBox.clicked.connect(self.close_attacher_window)
#
# self.radioSpouse.setChecked(True)
#
# def select_in_list(self, cur, prev):
# if cur:
# self.key = cur.text()[:3]
# self.secondarylist.clear()
#
# if self.radioSpouse.isChecked():
# gender = 'F' if self.people[self.key].gender == 'M' else 'M'
# for a in sorted(self.people):
# if self.people[a].gender == gender:
# self.secondarylist.addItem(a + ' - ' + self.people[a].nick_name)
#
# self.secondarylist.setCurrentRow(0)
#
# if self.radioParents.isChecked():
# self.secondparentlist.clear()
# for a in sorted(self.people):
# if self.marital_check(a):
# self.secondarylist.addItem(a + ' - ' + self.people[a].nick_name)
#
# self.secondarylist.setCurrentRow(0)
#
# if self.radioChild.isChecked():
# self.spouseList.clear()
# for a in self.people[self.key].marriage:
# if a[0] in ['M', 'D']:
# self.spouseList.addItem(a[1:4] + ' - ' + self.people[a[1:4]].nick_name)
# self.spouseList.setCurrentRow(0)
#
# self.secondarylist.clear()
# for a in sorted(self.people):
# if not self.people[a].father_id and not self.people[a].mother_id and a != self.key:
# self.secondarylist.addItem(a + ' - ' + self.people[a].nick_name)
#
# def select_in_second_list(self, cur, prev):
# if cur:
# if self.radioParents.isChecked():
# parent = cur.text()[:3]
# self.secondparentlist.clear()
# for a in self.people[parent].marriage:
# if a[0] in ['M', 'D']:
# self.secondparentlist.addItem(a[1:4] + ' - ' + self.people[a[1:4]].nick_name)
# self.secondparentlist.setCurrentRow(0)
#
# def marital_check(self, person):
# for a in self.people[person].marriage:
# if a[0] in ['M', 'D']:
# return True
# return False
#
# def spouse_mode(self):
# if self.radioSpouse.isChecked() and self.sender().text() == 'Spouse':
# self.spouselabel.hide()
# self.spouseList.clear()
# self.spouseList.hide()
# self.parentlabel.hide()
# self.secondparentlist.clear()
# self.secondparentlist.hide()
# self.radioEngaged.show()
# self.radioMarried.show()
# self.radioDivorced.show()
# self.relationGroup.setFlat(False)
#
# self.radioMarried.setChecked(True)
#
# self.primarylist.clear()
# for a in sorted(self.people):
# self.primarylist.addItem(a + ' - ' + self.people[a].nick_name)
#
# self.primarylist.setCurrentRow(0)
#
# def parents_mode(self):
# if self.radioParents.isChecked() and self.sender().text() == 'Parents':
# self.spouselabel.hide()
# self.spouseList.clear()
# self.spouseList.hide()
# self.parentlabel.show()
# self.secondparentlist.show()
# self.radioEngaged.hide()
# self.radioMarried.hide()
# self.radioDivorced.hide()
# self.relationGroup.setFlat(True)
#
# self.primarylist.clear()
# for a in sorted(self.people):
# if not (self.people[a].father_id or self.people[a].mother_id):
# self.primarylist.addItem(a + ' - ' + self.people[a].nick_name)
# self.primarylist.setCurrentRow(0)
#
# def child_mode(self):
# if self.radioChild.isChecked() and self.sender().text() == 'Child':
# self.spouselabel.show()
# self.spouseList.show()
# self.parentlabel.hide()
# self.secondparentlist.clear()
# self.secondparentlist.hide()
# self.radioEngaged.hide()
# self.radioMarried.hide()
# self.radioDivorced.hide()
# self.relationGroup.setFlat(True)
#
# self.primarylist.clear()
# for a in sorted(self.people):
# if self.marital_check(a):
# self.primarylist.addItem(a + ' - ' + self.people[a].nick_name)
#
# self.primarylist.setCurrentRow(0)
#
# def close_attacher_window(self, button):
# # print(self.marriage, self.divorce)
# if button.text().lower() == 'ok':
# if self.radioSpouse.isChecked():
# if self.radioMarried.isChecked():
# status = 'M'
# elif self.radioEngaged.isChecked():
# status = 'E'
# elif self.radioDivorced.isChecked():
# status = 'D'
# else:
# raise Exception('error in getting status for spouse attacher')
#
# spouse = self.secondarylist.currentItem().text()[:3]
# if self.people[self.key].marriage[0][0] == 'S':
# self.people[self.key].marriage = [status + spouse]
# else:
# self.people[self.key].marriage.append(status + spouse)
#
# if self.people[spouse].marriage[0][0] == 'S':
# self.people[spouse].marriage = [status + self.key]
# else:
# self.people[spouse].marriage.append(status + self.key)
#
# if self.radioParents.isChecked():
# parentone = self.secondarylist.currentItem().text()[:3]
# pone_gender = self.people[parentone].gender
# parenttwo = self.secondparentlist.currentItem().text()[:3]
# ptwo_gender = self.people[parenttwo].gender
#
# if pone_gender == 'M' and ptwo_gender == 'F':
# self.people[self.key].father_id = parentone
# self.people[self.key].mother_id = parenttwo
# elif pone_gender == 'F' and ptwo_gender == 'M':
# self.people[self.key].mother_id = parentone
# self.people[self.key].father_id = parenttwo
# else:
# raise Exception('Gender error in attaching parents')
#
# self.people[parentone].children.append(self.key)
# self.people[parenttwo].children.append(self.key)
#
#
# if self.radioChild.isChecked():
# self_gender = self.people[self.key].gender
# spouse = self.spouseList.currentItem().text()[:3]
# spouse_gender = self.people[spouse].gender
# child = self.secondarylist.currentItem().text()[:3]
#
# self.people[self.key].children.append(child)
# self.people[spouse].children.append(child)
#
# if self_gender == 'M' and spouse_gender == 'F':
# self.people[child].father_id = self.key
# self.people[child].mother_id = spouse
# elif self_gender == 'F' and spouse_gender == 'M':
# self.people[child].father_id = spouse
# self.people[child].mother_id = self.key
# else:
# raise Exception('Gender error in attaching parents')
#
# self.hide()
#
# # def spouse_window(self):
# # try:
# # self.spouseWindow = SpouseDialog(self.key, self.spouseList.currentItem().text()[:3], self.people)
# # self.spouseWindow.show()
# # except AttributeError:
# # pass
def main():
app = QApplication(sys.argv)
form = MainDialog()
form.show()
app.exec_()
if __name__ == '__main__':
main()
| |
from __future__ import unicode_literals
import logging
import os
import re
import traceback
from zipfile import ZipFile
from django.conf import settings
from django.core.paginator import InvalidPage, Paginator
from django.core.urlresolvers import NoReverseMatch
from django.http import (HttpResponse,
HttpResponseBadRequest,
HttpResponseNotFound,
HttpResponseNotModified,
HttpResponseServerError,
Http404)
from django.shortcuts import get_object_or_404
from django.utils.safestring import mark_safe
from django.utils.six.moves import cStringIO as StringIO
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView, View
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.compat.django.template.loader import render_to_string
from djblets.util.http import encode_etag, etag_if_none_match, set_etag
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from reviewboard.diffviewer.commit_utils import (diff_histories,
get_base_and_tip_commits)
from reviewboard.diffviewer.diffutils import (get_diff_files,
get_enable_highlighting)
from reviewboard.diffviewer.errors import PatchError, UserVisibleError
from reviewboard.diffviewer.models import DiffCommit, DiffSet, FileDiff
from reviewboard.diffviewer.renderers import (get_diff_renderer,
get_diff_renderer_class)
from reviewboard.scmtools.errors import FileNotFoundError
from reviewboard.site.urlresolvers import local_site_reverse
def get_collapse_diff(request):
if request.GET.get('expand', False):
return False
elif request.GET.get('collapse', False):
return True
elif 'collapsediffs' in request.COOKIES:
return (request.COOKIES['collapsediffs'] == "True")
else:
return True
class DiffViewerView(TemplateView):
"""Renders the main diff viewer.
This renders the diff viewer for a given DiffSet (or an interdiff
between two DiffSets). It handles loading information on the diffs,
generating the side-by-side view, and pagination.
The view expects the following parameters to be provided:
``diffset``
The DiffSet to render.
The following may also be provided:
``interdiffset``
A DiffSet object representing the other end of an interdiff range.
The following query parameters can be passed in on the URL:
``?expand=1``
Expands all files within the diff viewer.
``?collapse=1``
Collapses all files within the diff viewer, showing only
modifications and a few lines of context.
``?file=<id>``
Renders only the FileDiff represented by the provided ID.
``?filenames=<pattern>[,<pattern>,...]``
Renders files matching the given filenames or
:py:mod:`patterns <fnmatch>`. Patterns are case-sensitive.
``?page=<pagenum>``
Renders diffs found on the given page number, if the diff viewer
is paginated.
``?base-commit-id=<id>``
The ID of the base commit to use to generate the diff for diffs created
with multiple commits.
Only changes from after the specified commit will be included in the
diff.
``?tip-commit-id=<id>``
The ID of the tip commit to use to generate the diff for diffs created
created with history.
No changes from beyond this commit will be included in the diff.
"""
template_name = 'diffviewer/view_diff.html'
fragment_error_template_name = 'diffviewer/diff_fragment_error.html'
def get(self, request, diffset, interdiffset=None, *args, **kwargs):
"""Handles GET requests for this view.
This will render the full diff viewer based on the provided
parameters.
The full rendering time will be logged.
If there's any exception thrown during rendering, an error page
with a traceback will be returned instead.
"""
self.collapse_diffs = get_collapse_diff(request)
if interdiffset:
logging.debug('Generating diff viewer page for interdiffset '
'ids %s-%s',
diffset.id, interdiffset.id, request=request)
else:
logging.debug('Generating diff viewer page for filediff id %s',
diffset.id, request=request)
try:
response = super(DiffViewerView, self).get(
request, diffset=diffset, interdiffset=interdiffset,
*args, **kwargs)
if interdiffset:
logging.debug('Done generating diff viewer page for '
'interdiffset ids %s-%s',
diffset.id, interdiffset.id, request=request)
else:
logging.debug('Done generating diff viewer page for filediff '
'id %s',
diffset.id, request=request)
return response
except Exception as e:
if interdiffset:
interdiffset_id = interdiffset.pk
else:
interdiffset_id = None
logging.exception('%s.get: Error rendering diff for diffset '
'ID=%s, interdiffset ID=%s: %s',
self.__class__.__name__,
diffset.pk,
interdiffset_id,
e,
request=request)
return exception_traceback(request, e, self.template_name)
def render_to_response(self, *args, **kwargs):
"""Renders the page to an HttpResponse.
This renders the diff viewer page, based on the context data
generated, and sets cookies before returning an HttpResponse to
the client.
"""
response = super(DiffViewerView, self).render_to_response(*args,
**kwargs)
response.set_cookie('collapsediffs', self.collapse_diffs)
return response
def get_context_data(self, diffset, interdiffset, extra_context={},
**kwargs):
"""Calculates and returns data used for rendering the diff viewer.
This handles all the hard work of generating the data backing the
side-by-side diff, handling pagination, and more. The data is
collected into a context dictionary and returned for rendering.
"""
try:
filename_patterns = \
re.split(',+\s*', self.request.GET['filenames'].strip())
except KeyError:
filename_patterns = []
base_commit_id = None
base_commit = None
tip_commit_id = None
tip_commit = None
commits_by_diffset_id = {}
if diffset.commit_count > 0:
diffset_pks = [diffset.pk]
if interdiffset:
diffset_pks.append(interdiffset.pk)
commits = DiffCommit.objects.filter(diffset_id__in=diffset_pks)
for commit in commits:
commits_by_diffset_id.setdefault(commit.diffset_id, []).append(
commit)
# Base and tip commit selection is not supported in interdiffs.
if not interdiffset:
raw_base_commit_id = self.request.GET.get('base-commit-id')
raw_tip_commit_id = self.request.GET.get('tip-commit-id')
if raw_base_commit_id is not None:
try:
base_commit_id = int(raw_base_commit_id)
except ValueError:
pass
if raw_tip_commit_id is not None:
try:
tip_commit_id = int(raw_tip_commit_id)
except ValueError:
pass
base_commit, tip_commit = get_base_and_tip_commits(
base_commit_id,
tip_commit_id,
commits=commits_by_diffset_id[diffset.pk])
files = get_diff_files(diffset=diffset,
interdiffset=interdiffset,
request=self.request,
filename_patterns=filename_patterns,
base_commit=base_commit,
tip_commit=tip_commit)
# Break the list of files into pages
siteconfig = SiteConfiguration.objects.get_current()
paginator = Paginator(files,
siteconfig.get('diffviewer_paginate_by'),
siteconfig.get('diffviewer_paginate_orphans'))
page_num = int(self.request.GET.get('page', 1))
if self.request.GET.get('file', False):
file_id = int(self.request.GET['file'])
for i, f in enumerate(files):
if f['filediff'].pk == file_id:
page_num = i // paginator.per_page + 1
if page_num > paginator.num_pages:
page_num = paginator.num_pages
break
try:
page = paginator.page(page_num)
except InvalidPage:
page = paginator.page(paginator.num_pages)
diff_context = {
'commits': None,
'commit_history_diff': None,
'filename_patterns': list(filename_patterns),
'revision': {
'revision': diffset.revision,
'is_interdiff': interdiffset is not None,
'interdiff_revision': (interdiffset.revision
if interdiffset else None),
},
'pagination': {
'is_paginated': page.has_other_pages(),
'current_page': page.number,
'pages': paginator.num_pages,
'page_numbers': list(paginator.page_range),
'has_next': page.has_next(),
'has_previous': page.has_previous(),
},
}
if page.has_next():
diff_context['pagination']['next_page'] = page.next_page_number()
if page.has_previous():
diff_context['pagination']['previous_page'] = \
page.previous_page_number()
if diffset.commit_count > 0:
if interdiffset:
diff_context['commit_history_diff'] = [
entry.serialize()
for entry in diff_histories(
commits_by_diffset_id[diffset.pk],
commits_by_diffset_id[interdiffset.pk])
]
all_commits = [
commit
for pk in commits_by_diffset_id
for commit in commits_by_diffset_id[pk]
]
diff_context['commits'] = [
commit.serialize()
for commit in sorted(all_commits,
key=lambda commit: commit.pk)
]
revision_context = diff_context['revision']
revision_context.update({
'base_commit_id': base_commit_id,
'tip_commit_id': tip_commit_id,
})
context = dict({
'diff_context': diff_context,
'diffset': diffset,
'interdiffset': interdiffset,
'diffset_pair': (diffset, interdiffset),
'files': page.object_list,
'collapseall': self.collapse_diffs,
}, **extra_context)
return context
class DiffFragmentView(View):
"""Renders a fragment from a file in the diff viewer.
Based on the diffset data and other arguments provided, this will render
a fragment from a file in a diff. This may be the entire file, or some
chunk within.
The view expects the following parameters to be provided:
* diffset_or_id
- A DiffSet object or the ID for one.
* filediff_id
- The ID of a FileDiff within the DiffSet.
The following may also be provided:
* interdiffset_or_id
- A DiffSet object or the ID for one representing the other end of
an interdiff range.
* interfilediff_id
- A FileDiff ID for the other end of a revision range.
* chunk_index
- The index (0-based) of the chunk to render. If left out, the
entire file will be rendered.
Both ``filediff_id` and ``interfilediff_id`` need to be available in the
URL (or otherwise passed to :py:meth:`get`). ``diffset_or_id`` and
``interdiffset_or_id`` are needed in :py:meth:`process_diff_info`, and
so must be passed either in the URL or in a subclass's definition of
that method.
The following query parameters can be passed in on the URL:
``?lines-of-context=<count>``
A number of lines of context to include above and below the chunk.
``?base-filediff-id<=id>``
The primary key of the base FileDiff.
This parameter is ignored if the review request was created without
commit history support.
This conflicts with the ``interfilediff_id``.
"""
template_name = 'diffviewer/diff_file_fragment.html'
error_template_name = 'diffviewer/diff_fragment_error.html'
patch_error_template_name = 'diffviewer/diff_fragment_patch_error.html'
def get(self, request, *args, **kwargs):
"""Handle GET requests for this view.
This will create the renderer for the diff fragment, render it, and
return it.
If there's an error when rendering the diff fragment, an error page
will be rendered and returned instead.
Args:
request (django.http.HttpRequest):
The HTTP request.
*args (tuple):
Additional positional arguments for the view.
**kwargs (dict):
Additional keyword arguments for the view.
Returns:
django.http.HttpResponse:
A response containing the rendered fragment.
"""
filediff_id = kwargs.get('filediff_id')
interfilediff_id = kwargs.get('interfilediff_id')
chunk_index = kwargs.get('chunk_index')
base_filediff_id = request.GET.get('base-filediff-id')
try:
renderer_settings = self._get_renderer_settings(**kwargs)
etag = self.make_etag(renderer_settings, **kwargs)
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
diff_info_or_response = self.process_diffset_info(
base_filediff_id=base_filediff_id,
**kwargs)
if isinstance(diff_info_or_response, HttpResponse):
return diff_info_or_response
except Http404:
raise
except Exception as e:
logging.exception('%s.get: Error when processing diffset info '
'for filediff ID=%s, interfilediff ID=%s, '
'chunk_index=%s: %s',
self.__class__.__name__,
filediff_id,
interfilediff_id,
chunk_index,
e,
request=request)
return exception_traceback(self.request, e,
self.error_template_name)
kwargs.update(diff_info_or_response)
try:
context = self.get_context_data(**kwargs)
renderer = self.create_renderer(
context=context,
renderer_settings=renderer_settings,
*args, **kwargs)
response = renderer.render_to_response(request)
except PatchError as e:
logging.warning(
'%s.get: PatchError when rendering diffset for filediff '
'ID=%s, interfilediff ID=%s, chunk_index=%s: %s',
self.__class__.__name__,
filediff_id,
interfilediff_id,
chunk_index,
e,
request=request)
try:
url_kwargs = {
key: kwargs[key]
for key in ('chunk_index', 'interfilediff_id',
'review_request_id', 'filediff_id',
'revision', 'interdiff_revision')
if key in kwargs and kwargs[key] is not None
}
bundle_url = local_site_reverse('patch-error-bundle',
kwargs=url_kwargs,
request=request)
except NoReverseMatch:
# We'll sometimes see errors about this failing to resolve when
# web crawlers start accessing fragment URLs without the proper
# attributes. Ignore them.
bundle_url = ''
if e.rejects:
lexer = get_lexer_by_name('diff')
formatter = HtmlFormatter()
rejects = highlight(e.rejects, lexer, formatter)
else:
rejects = None
return HttpResponseServerError(render_to_string(
template_name=self.patch_error_template_name,
context={
'bundle_url': bundle_url,
'file': diff_info_or_response['diff_file'],
'filename': os.path.basename(e.filename),
'patch_output': e.error_output,
'rejects': mark_safe(rejects),
},
request=request))
except FileNotFoundError as e:
return HttpResponseServerError(render_to_string(
template_name=self.error_template_name,
context={
'error': e,
'file': diff_info_or_response['diff_file'],
},
request=request))
except Exception as e:
logging.exception('%s.get: Error when rendering diffset for '
'filediff ID=%s, interfilediff ID=%s, '
'chunkindex=%s: %s',
self.__class__.__name__,
filediff_id,
interfilediff_id,
chunk_index,
e,
request=request)
return exception_traceback(
self.request, e, self.error_template_name,
extra_context={
'file': diff_info_or_response['diff_file'],
})
if response.status_code == 200:
set_etag(response, etag)
return response
def make_etag(self, renderer_settings, filediff_id,
interfilediff_id=None, **kwargs):
"""Return an ETag identifying this render.
Args:
renderer_settings (dict):
The settings determining how to render this diff.
The following keys are required: ``collapse_all`` and
``highlighting``.
The following key is optional: ``show_deleted``.
filediff_id (int):
The ID of the
:py:class:`~reviewboard.diffviewer.models.filediff.FileDiff`
being rendered.
interfilediff_id (int):
The ID of the
:py:class:`~reviewboard.diffviewer.models.filediff.FileDiff` on
the other side of the diff revision, if viewing an interdiff.
**kwargs (dict):
Additional keyword arguments passed to the function.
Return:
unicode:
The encoded ETag identifying this render.
"""
etag = '%s:%s:%s:%s:%s:%s' % (
get_diff_renderer_class(),
renderer_settings['collapse_all'],
renderer_settings['highlighting'],
filediff_id,
interfilediff_id,
settings.TEMPLATE_SERIAL)
show_deleted = renderer_settings.get('show_deleted')
if show_deleted:
etag += ':%s' % show_deleted
return encode_etag(etag)
def process_diffset_info(self, diffset_or_id, filediff_id,
interfilediff_id=None, interdiffset_or_id=None,
base_filediff_id=None, **kwargs):
"""Process and return information on the desired diff.
The diff IDs and other data passed to the view can be processed and
converted into DiffSets. A dictionary with the DiffSet and FileDiff
information will be returned.
A subclass may instead return a HttpResponse to indicate an error
with the DiffSets.
"""
# Depending on whether we're invoked from a URL or from a wrapper
# with precomputed diffsets, we may be working with either IDs or
# actual objects. If they're objects, just use them as-is. Otherwise,
# if they're IDs, we want to grab them both (if both are provided)
# in one go, to save on an SQL query.
diffset = None
interdiffset = None
diffset_ids = []
if isinstance(diffset_or_id, DiffSet):
diffset = diffset_or_id
else:
diffset_ids.append(diffset_or_id)
if interdiffset_or_id:
if isinstance(interdiffset_or_id, DiffSet):
interdiffset = interdiffset_or_id
else:
diffset_ids.append(interdiffset_or_id)
if diffset_ids:
diffsets = DiffSet.objects.filter(pk__in=diffset_ids)
if len(diffsets) != len(diffset_ids):
raise Http404
for temp_diffset in diffsets:
if temp_diffset.pk == diffset_or_id:
diffset = temp_diffset
elif temp_diffset.pk == interdiffset_or_id:
interdiffset = temp_diffset
else:
assert False
filediff = get_object_or_404(FileDiff, pk=filediff_id, diffset=diffset)
base_filediff = None
interfilediff = None
if interfilediff_id and base_filediff_id:
raise UserVisibleError(_(
'Cannot generate an interdiff when base FileDiff ID is '
'specified.'
))
elif interfilediff_id:
interfilediff = get_object_or_404(FileDiff, pk=interfilediff_id,
diffset=interdiffset)
elif base_filediff_id:
base_filediff = get_object_or_404(FileDiff, pk=base_filediff_id,
diffset=diffset)
ancestors = filediff.get_ancestors(minimal=False)
if base_filediff not in ancestors:
raise UserVisibleError(_(
'The requested FileDiff (ID %s) is not a valid base '
'FileDiff for FileDiff %s.'
% (base_filediff_id, filediff_id)
))
# Store this so we don't end up causing an SQL query later when looking
# this up.
filediff.diffset = diffset
diff_file = self._get_requested_diff_file(
diffset, filediff, interdiffset, interfilediff, base_filediff)
if not diff_file:
raise UserVisibleError(
_('Internal error. Unable to locate file record for '
'filediff %s')
% filediff.pk)
return {
'diffset': diffset,
'interdiffset': interdiffset,
'filediff': filediff,
'diff_file': diff_file,
}
def create_renderer(self, context, renderer_settings, diff_file,
*args, **kwargs):
"""Creates the renderer for the diff.
This calculates all the state and data needed for rendering, and
constructs a DiffRenderer with that data. That renderer is then
returned, ready for rendering.
If there's an error in looking up the necessary information, this
may raise a UserVisibleError (best case), or some other form of
Exception.
"""
return get_diff_renderer(
diff_file,
extra_context=context,
template_name=self.template_name,
**renderer_settings)
def get_context_data(self, *args, **kwargs):
"""Returns context data used for rendering the view.
This can be overridden by subclasses to provide additional data for the
view.
"""
return {}
def _get_renderer_settings(self, chunk_index=None, **kwargs):
"""Calculate the render settings for the display of a diff.
This will calculate settings based on user preferences and URL
parameters. It does not calculate the state of any DiffSets or
FileDiffs.
"""
highlighting = get_enable_highlighting(self.request.user)
try:
lines_of_context = self.request.GET.get('lines-of-context', '')
lines_of_context = [int(i) for i in lines_of_context.split(',', 1)]
except (TypeError, ValueError):
lines_of_context = None
if chunk_index is not None:
try:
chunk_index = int(chunk_index)
except (TypeError, ValueError):
chunk_index = None
if lines_of_context:
collapse_all = True
elif chunk_index is not None:
# If we're currently expanding part of a chunk, we want to render
# the entire chunk without any lines collapsed. In the case of
# showing a range of lines, we're going to get all chunks and then
# only show the range. This is so that we won't have separate
# cached entries for each range.
collapse_all = False
else:
collapse_all = get_collapse_diff(self.request)
show_deleted = (self.request.GET.get('show-deleted') == '1')
return {
'chunk_index': chunk_index,
'collapse_all': collapse_all,
'highlighting': highlighting,
'lines_of_context': lines_of_context,
'show_deleted': show_deleted,
}
def _get_requested_diff_file(self, diffset, filediff, interdiffset,
interfilediff, base_filediff):
"""Fetches information on the requested diff.
This will look up information on the diff that's to be rendered
and return it, if found. It may also augment it with additional
data.
The file will not contain chunk information. That must be specifically
populated later.
"""
files = get_diff_files(diffset=diffset,
interdiffset=interdiffset,
filediff=filediff,
interfilediff=interfilediff,
base_filediff=base_filediff,
request=self.request)
if files:
diff_file = files[0]
try:
diff_file['index'] = int(self.request.GET['index'])
except (KeyError, ValueError):
pass
return diff_file
return None
class DownloadPatchErrorBundleView(DiffFragmentView):
"""A view to download the patch error bundle.
This view allows users to download a bundle containing data to help debug
issues when a patch fails to apply. The bundle will contain the diff, the
original file (as returned by the SCMTool), and the rejects file, if
applicable.
"""
def get(self, request, *args, **kwargs):
"""Handle GET requests for this view.
This will create the renderer for the diff fragment and render it in
order to get the PatchError information. It then returns a response
with a zip file containing all the debug data.
If no PatchError occurred, this will return a 404.
Args:
request (django.http.HttpRequest):
The HTTP request.
*args (tuple):
Additional positional arguments for the view.
**kwargs (dict):
Additional keyword arguments for the view.
Returns:
django.http.HttpResponse:
A response containing the data bundle.
"""
try:
renderer_settings = self._get_renderer_settings(**kwargs)
etag = self.make_etag(renderer_settings, **kwargs)
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
diff_info_or_response = self.process_diffset_info(**kwargs)
if isinstance(diff_info_or_response, HttpResponse):
return diff_info_or_response
except Http404:
return HttpResponseNotFound()
except Exception as e:
logging.exception(
'%s.get: Error when processing diffset info for filediff '
'ID=%s, interfilediff ID=%s, chunk_index=%s: %s',
self.__class__.__name__,
kwargs.get('filediff_id'),
kwargs.get('interfilediff_id'),
kwargs.get('chunk_index'),
e,
request=request)
return HttpResponseServerError()
kwargs.update(diff_info_or_response)
try:
context = self.get_context_data(**kwargs)
renderer = self.create_renderer(
context=context,
renderer_settings=renderer_settings,
*args, **kwargs)
renderer.render_to_response(request)
except PatchError as e:
patch_error = e
except Exception as e:
logging.exception(
'%s.get: Error when rendering diffset for filediff ID=%s, '
'interfilediff ID=%s, chunk_index=%s: %s',
self.__class__.__name__,
kwargs.get('filediff_id'),
kwargs.get('interfilediff_id'),
kwargs.get('chunk_index'),
e,
request=request)
return HttpResponseServerError()
else:
return HttpResponseNotFound()
zip_data = StringIO()
with ZipFile(zip_data, 'w') as zipfile:
basename = os.path.basename(patch_error.filename)
zipfile.writestr('%s.orig' % basename, patch_error.orig_file)
zipfile.writestr('%s.diff' % basename, patch_error.diff)
if patch_error.rejects:
zipfile.writestr('%s.rej' % basename, patch_error.rejects)
if patch_error.new_file:
zipfile.writestr('%s.new' % basename, patch_error.new_file)
rsp = HttpResponse(zip_data.getvalue(),
content_type='application/zip')
rsp['Content-Disposition'] = \
'attachment; filename=%s.zip' % basename
return rsp
def exception_traceback_string(request, e, template_name, extra_context={}):
context = {'error': e}
context.update(extra_context)
if not isinstance(e, UserVisibleError):
context['trace'] = traceback.format_exc()
return render_to_string(template_name=template_name,
context=context,
request=request)
def exception_traceback(request, e, template_name, extra_context={}):
return HttpResponseServerError(
exception_traceback_string(request, e, template_name, extra_context))
| |
# Note that the dataset must be already downloaded for this script to work, do:
# $ cd data/
# $ python download_dataset.py
# quoc_trinh
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import metrics
import os
import sys
import datetime
# get current file_name as [0] of array
file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print(" File Name:")
print(file_name)
print("")
# FLAG to know that whether this is traning process or not.
FLAG = 'train'
N_HIDDEN_CONFIG = 32
save_path_name = file_name + "/model.ckpt"
print(datetime.datetime.now())
# Write to file: time to start, type, time to end
f = open(file_name + '/time.txt', 'a+')
f.write("------------- \n")
f.write("This is time \n")
f.write("Started at \n")
f.write(str(datetime.datetime.now())+'\n')
if __name__ == "__main__":
# -----------------------------
# step1: load and prepare data
# -----------------------------
# Those are separate normalised input features for the neural network
INPUT_SIGNAL_TYPES = [
"body_acc_x_",
"body_acc_y_",
"body_acc_z_",
"body_gyro_x_",
"body_gyro_y_",
"body_gyro_z_",
"total_acc_x_",
"total_acc_y_",
"total_acc_z_"
]
# Output classes to learn how to classify
LABELS = [
"WALKING",
"WALKING_UPSTAIRS",
"WALKING_DOWNSTAIRS",
"SITTING",
"STANDING",
"LAYING"
]
DATA_PATH = "../data/"
DATASET_PATH = DATA_PATH + "UCI HAR Dataset/"
print("\n" + "Dataset is now located at: " + DATASET_PATH)
# Preparing data set:
TRAIN = "train/"
TEST = "test/"
# Load "X" (the neural network's training and testing inputs)
def load_X(X_signals_paths):
X_signals = []
for signal_type_path in X_signals_paths:
file = open(signal_type_path, 'rb')
# Read dataset from disk, dealing with text files' syntax
X_signals.append(
[np.array(serie, dtype=np.float32) for serie in [
row.replace(' ', ' ').strip().split(' ') for row in file
]]
)
file.close()
"""Examples
--------
>> > x = np.arange(4).reshape((2, 2))
>> > x
array([[0, 1],
[2, 3]])
>> > np.transpose(x)
array([[0, 2],
[1, 3]])
>> > x = np.ones((1, 2, 3))
>> > np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return np.transpose(np.array(X_signals), (1, 2, 0))
X_train_signals_paths = [
DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES
]
X_test_signals_paths = [
DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES
]
X_train = load_X(X_train_signals_paths) # [7352, 128, 9]
X_test = load_X(X_test_signals_paths) # [7352, 128, 9]
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 128
print(len(X_train[0][0])) # 9
print(type(X_train))
X_train = np.reshape(X_train, [-1, 32, 36])
X_test = np.reshape(X_test, [-1, 32, 36])
print("-----------------X_train---------------")
# print(X_train)
print(len(X_train)) # 7352
print(len(X_train[0])) # 32
print(len(X_train[0][0])) # 36
print(type(X_train))
# exit()
y_train_path = DATASET_PATH + TRAIN + "y_train.txt"
y_test_path = DATASET_PATH + TEST + "y_test.txt"
def one_hot(label):
"""convert label from dense to one hot
argument:
label: ndarray dense label ,shape: [sample_num,1]
return:
one_hot_label: ndarray one hot, shape: [sample_num,n_class]
"""
label_num = len(label)
new_label = label.reshape(label_num) # shape : [sample_num]
# because max is 5, and we will create 6 columns
n_values = np.max(new_label) + 1
return np.eye(n_values)[np.array(new_label, dtype=np.int32)]
# Load "y" (the neural network's training and testing outputs)
def load_y(y_path):
file = open(y_path, 'rb')
# Read dataset from disk, dealing with text file's syntax
y_ = np.array(
[elem for elem in [
row.replace(' ', ' ').strip().split(' ') for row in file
]],
dtype=np.int32
)
file.close()
# Subtract 1 to each output class for friendly 0-based indexing
return y_ - 1
y_train = one_hot(load_y(y_train_path))
y_test = one_hot(load_y(y_test_path))
print("---------y_train----------")
# print(y_train)
print(len(y_train)) # 7352
print(len(y_train[0])) # 6
# -----------------------------------
# step2: define parameters for model
# -----------------------------------
class Config(object):
"""
define a class to store parameters,
the input should be feature mat of training and testing
"""
def __init__(self, X_train, X_test):
# Input data
self.train_count = len(X_train) # 7352 training series
self.test_data_count = len(X_test) # 2947 testing series
self.n_steps = len(X_train[0]) # 128 time_steps per series
# Training
self.learning_rate = 0.0025
self.lambda_loss_amount = 0.0015
self.training_epochs = 300
self.batch_size = 1000
# LSTM structure
self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time
self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network
self.n_classes = 6 # Final output classes
self.W = {
'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32]
'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6]
}
self.biases = {
'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32]
'output': tf.Variable(tf.random_normal([self.n_classes])) # [6]
}
config = Config(X_train, X_test)
# print("Some useful info to get an insight on dataset's shape and normalisation:")
# print("features shape, labels shape, each features mean, each features standard deviation")
# print(X_test.shape, y_test.shape,
# np.mean(X_test), np.std(X_test))
# print("the dataset is therefore properly normalised, as expected.")
#
#
# ------------------------------------------------------
# step3: Let's get serious and build the neural network
# ------------------------------------------------------
# [none, 128, 9]
X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs])
# [none, 6]
Y = tf.placeholder(tf.float32, [None, config.n_classes])
print("-------X Y----------")
print(X)
X = tf.reshape(X, shape=[-1, 32, 36])
print(X)
print(Y)
Y = tf.reshape(Y, shape=[-1, 6])
print(Y)
# Weight Initialization
def weight_variable(shape):
# tra ve 1 gia tri random theo thuat toan truncated_ normal
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32)
return tf.Variable(initial)
def bias_varibale(shape):
initial = tf.constant(0.1, shape=shape, name='Bias')
return tf.Variable(initial)
# Convolution and Pooling
def conv2d(x, W):
# Must have `strides[0] = strides[3] = 1 `.
# For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `.
return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d')
def max_pool_2x2(x):
return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME', name='max_pool')
def LSTM_Network(feature_mat, config):
"""model a LSTM Network,
it stacks 2 LSTM layers, each layer has n_hidden=32 cells
and 1 output layer, it is a full connet layer
argument:
feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs]
config: class containing config of network
return:
: matrix output shape [batch_size,n_classes]
"""
W_conv1 = weight_variable([3, 3, 1, 64])
b_conv1 = bias_varibale([64])
# x_image = tf.reshape(x, shape=[-1, 28, 28, 1])
feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1])
print("----feature_mat_image-----")
print(feature_mat_image.get_shape())
h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second Convolutional Layer
W_conv2 = weight_variable([3, 3, 64, 1])
b_conv2 = weight_variable([1])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = h_conv2
h_pool2 = tf.reshape(h_pool2, shape=[-1, 32, 36])
feature_mat = h_pool2
print("----feature_mat-----")
print(feature_mat)
# exit()
# W_fc1 = weight_variable([8 * 9 * 1, 1024])
# b_fc1 = bias_varibale([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# print("----h_fc1_drop-----")
# print(h_fc1)
# exit()
#
# # keep_prob = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(1.0)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob)
# print("----h_fc1_drop-----")
# print(h_fc1_drop)
# exit()
#
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_varibale([10])
#
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# print("----y_conv-----")
# print(y_conv)
# exit()
# Exchange dim 1 and dim 0
# Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36]
feature_mat = tf.transpose(feature_mat, [1, 0, 2])
# New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9]
print("----feature_mat-----")
print(feature_mat)
# exit()
# Temporarily crush the feature_mat's dimensions
feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9
# New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9
# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
feature_mat, config.W['hidden']
) + config.biases['hidden'])
# New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32]
print("--n_steps--")
print(config.n_steps)
print("--hidden--")
print(hidden)
# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(0, config.n_steps, hidden) # (0, 128, [128*batch_size, 32])
# New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden]
# Define LSTM cell of first hidden layer:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2)
# Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here
outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32)
# outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden]
print("------------------list-------------------")
print(outputs)
# Get last time step's output feature for a "many to one" style classifier,
# as in the image describing RNNs at the top of this page
lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32]
print("------------------last outputs-------------------")
print (lstm_last_output)
# Linear activation
return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output']
pred_Y = LSTM_Network(X, config) # shape[?,6]
print("------------------pred_Y-------------------")
print(pred_Y)
# Loss,train_step,evaluation
l2 = config.lambda_loss_amount * \
sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
# Softmax loss and L2
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2
train_step = tf.train.AdamOptimizer(
learning_rate=config.learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))
# --------------------------------------------
# step4: Hooray, now train the neural network
# --------------------------------------------
# Note that log_device_placement can be turned ON but will cause console spam.
# Initializing the variables
init = tf.initialize_all_variables()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
best_accuracy = 0.0
# sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False))
if (FLAG == 'train') : # If it is the training mode
with tf.Session() as sess:
# tf.initialize_all_variables().run()
sess.run(init) # .run()
f.write("---Save model \n")
# Start training for each batch and loop epochs
for i in range(config.training_epochs):
for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500)
range(config.batch_size, config.train_count + 1,
config.batch_size)): # (1500, 7353, 1500)
print(start)
print(end)
sess.run(train_step, feed_dict={X: X_train[start:end],
Y: y_train[start:end]})
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
print("traing iter: {},".format(i) + \
" test accuracy : {},".format(accuracy_out) + \
" loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
# Save the model in this session
save_path = saver.save(sess, file_name + "/model.ckpt")
print("Model saved in file: %s" % save_path)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
else :
# Running a new session
print("Starting 2nd session...")
with tf.Session() as sess:
# Initialize variables
sess.run(init)
f.write("---Restore model \n")
# Restore model weights from previously saved model
saver.restore(sess, file_name+ "/model.ckpt")
print("Model restored from file: %s" % save_path_name)
# Test completely at every epoch: calculate accuracy
pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={
X: X_test, Y: y_test})
# print("traing iter: {}," + \
# " test accuracy : {},".format(accuracy_out) + \
# " loss : {}".format(loss_out))
best_accuracy = max(best_accuracy, accuracy_out)
print("")
print("final loss: {}").format(loss_out)
print("final test accuracy: {}".format(accuracy_out))
print("best epoch's test accuracy: {}".format(best_accuracy))
print("")
# Write all output to file
f.write("final loss:" + str(format(loss_out)) +" \n")
f.write("final test accuracy:" + str(format(accuracy_out)) +" \n")
f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n")
#
# #------------------------------------------------------------------
# # step5: Training is good, but having visual insight is even better
# #------------------------------------------------------------------
# # The code is in the .ipynb
#
# #------------------------------------------------------------------
# # step6: And finally, the multi-class confusion matrix and metrics!
# #------------------------------------------------------------------
# # The code is in the .ipynb
f.write("Ended at \n")
f.write(str(datetime.datetime.now())+'\n')
f.write("------------- \n")
f.close()
| |
"""
Creates the config data used by services.
@author: Davandev
"""
import davan.util.helper_functions as common
import os
import imp
def create_config(secret_config, config):
#------------------------------------------------------------------------------------------------
# General server configuration
#------------------------------------------------------------------------------------------------
# CRITICAL=0, ERROR,WARNING,INFO,DEBUG=4
config["LOGLEVEL"] = 3
# Root path of server
config["ROOT"] = '/share/davanserver/'
# Log directory path
config['LOGFILE_PATH'] = config["ROOT"] + "logs"
config['TEMP_PATH'] = config["ROOT"] + "temp"
# Service directory path
config['SERVICE_PATH'] = config["ROOT"] + "davan/http/service/"
# Server port
config["SERVER_PORT"] = 8080
config["SERVER_ADRESS"] = "192.168.2.50"
#---------------------------------------------------------------------------------------------
# Fibaro configuration
#---------------------------------------------------------------------------------------------
config['FIBARO_USER_NAME'] = secret_config.FIBARO_USER
config['FIBARO_PASSWORD'] = secret_config.FIBARO_PASSWORD
config['FIBARO_IP_ADDRESS'] = "192.168.2.54"
#---------------------------------------------------------------------------------------------
# Fibaro path for common functions
# --------------------------------------------------------------------------------------------
# Fibaro api access path
config['FIBARO_API_ADDRESS'] = "http://" + config['FIBARO_USER_NAME'] + ":" + config['FIBARO_PASSWORD'] + "@" + config['FIBARO_IP_ADDRESS'] + "/api/"
# Set property in a virtual device
config['UPDATE_DEVICE'] = config['FIBARO_API_ADDRESS'] + "callAction?deviceID=<DEVICEID>&name=setProperty&arg1=<LABELID>&arg2=<VALUE>"
# Press button on virtual device
config['VD_PRESS_BUTTON_URL'] = config['FIBARO_API_ADDRESS'] + "callAction?deviceID=<ID>&name=pressButton&arg1=<BUTTONID>"
# Get state of a scene
config['GET_STATE_SCENE_URL'] = config['FIBARO_API_ADDRESS'] + "scenes/<ID>"
# Command to start a scene
config['START_SCENE_URL'] = config['FIBARO_API_ADDRESS'] + "sceneControl?id=<ID>&action=start"
# Command to set a value on a device (dimmer)
config['DEVICE_SET_VALUE_WITH_ARG_URL'] = config['FIBARO_API_ADDRESS'] + "callAction?deviceID=<ID>&name=setValue&arg1=<VALUE>"
config['DEVICE_SET_VALUE_URL'] = config['FIBARO_API_ADDRESS'] + "callAction?deviceID=<ID>&name=<VALUE>"
#-----------------------------------------------------------------------------------------
# Logreceiver configuration
#-----------------------------------------------------------------------------------------
config["LogEntryEnabled"] = True
# Log directory path
config['HC2LOG_PATH'] = config["LOGFILE_PATH"] + "/hc2"
#---------------------------------------------------------------------------------------------
# Telegram configuration
#---------------------------------------------------------------------------------------------
# Telegram chat id, stored in a dict
config['CHATID'] = secret_config.TELEGRAM_CHATID
# Telegram token
config['TOKEN'] = secret_config.TELEGRAM_TOKEN
# Telegram api path for sending messages
config['TELEGRAM_PATH'] = "https://api.telegram.org/bot"+config['TOKEN']+"/sendMessage?chat_id=<CHATID>&text="
#---------------------------------------------------------------------------------------------
# Outdoor camera configuration
#---------------------------------------------------------------------------------------------
config["TakePictureEnabled"] = True
config['CAMERAS'] = {'Balkong':'http://192.168.2.119:99/snapshot.cgi/snapshot.cgi',
'Uterum':'http://192.168.2.76:99/snapshot.cgi/snapshot.cgi',
'Farstukvist':'http://192.168.2.172/tmpfs/snap.jpg'}
# Username used when accessing cameras
config['CAMERA_USER'] = secret_config.CAMERA_USER
# Password used when accessing cameras
config['CAMERA_PASSWORD'] = secret_config.CAMERA_PASSWORD
#---------------------------------------------------------------------------------------
# Presence Service configuration
#---------------------------------------------------------------------------------------
config["presenceEnabled"] = False
# Scene ids for each user
config['MIA_AWAY_SCENE_ID'] = "13"
config['MIA_HOME_SCENE_ID'] = "12"
config['DAVID_AWAY_SCENE_ID'] = "10"
config['DAVID_HOME_SCENE_ID'] = "9"
config['WILMA_AWAY_SCENE_ID'] = "15"
config['WILMA_HOME_SCENE_ID'] = "14"
config['VIGGO_HOME_SCENE_ID'] = "16"
config['VIGGO_AWAY_SCENE_ID'] = "17"
#---------------------------------------------------------------------------------------
# Asus router presence Service configuration
#---------------------------------------------------------------------------------------
config["DevicePresenceServiceEnabled"] = True
config["ROUTER_ADRESS"] = "192.168.2.1"
config["ROUTER_USER"] = secret_config.ROUTER_USER
config["ROUTER_PASSWORD"] = secret_config.ROUTER_PASSWORD
# Virtual device id of the virtual device in HC2 that shows the presence of the users
config['FIBARO_VD_PRESENCE_ID'] = "75"
# Mappings of users and the label in the presense virtual device
config['FIBARO_VD_MAPPINGS'] = {
# User : HC2 Virtualdevice ID
'Wilma' : 'ui.Label3.value',
'David' : 'ui.Label1.value',
'Mia' : 'ui.Label2.value',
'Viggo' : 'ui.Label4.value' }
# Ipadresses of devices where its wifi presence should be monitored
config['FAMILY_DEVICES'] = secret_config.DEVICES_FAMILY
config['GUEST_DEVICES'] = secret_config.DEVICES_FRIEND
config['HOUSE_DEVICES'] = secret_config.DEVICES_HOUSE
config['UNKNOWN_DEVICES'] = secret_config.DEVICES_UNKNOWN
#---------------------------------------------------------------------------------------
# Authentication configuration 98:00:c6:32:85:30
#---------------------------------------------------------------------------------------
config["authenticateEnabled"] = False
config['disarmAlarm'] = config['FIBARO_API_ADDRESS'] + "sceneControl?id=36&action=start"
config['disarmSkalskydd'] = config['FIBARO_API_ADDRESS'] + "sceneControl?id=38&action=start"
config['armSkalskydd'] = config['FIBARO_API_ADDRESS'] + "sceneControl?id=34&action=start"
config['armAlarm'] = config['FIBARO_API_ADDRESS'] + "sceneControl?id=35&action=start"
# User pin codes
config['USER_PIN'] = secret_config.USER_PIN
#---------------------------------------------------------------------------------------------
# Telldus sensor configuration
#---------------------------------------------------------------------------------------------
config["TelldusSensorServiceEnabled"] = False
config["telldusEnabled"] = True
# Telldus public key
config["TELLDUS_PUBLIC_KEY"] = secret_config.TELLDUS_PUBLIC_KEY
# Telldus private key
config["TELLDUS_PRIVATE_KEY"] = secret_config.TELLDUS_PRIVATE_KEY
# Dict holding name of room and virtual device id
config['SENSOR_MAP'] = {
# Room name : HC2 Virtualdevice ID
'Badrum' : '147',
'Tvattstuga': '220',
'Garage': '149',
'Gillestuga': '153',
'Farstukvist': '152',
'Sovrum': '151',
'Wilma': '150' }
config['SENSOR_HUMIDITY_LIMITS'] = {'Badrum': 60, 'Tvattstuga': 60}
config['SENSOR_TEMP_HIGH_LIMITS'] = {}
config['SENSOR_TEMP_LOW_LIMITS'] = {}
# Temperature virtual devices
config['LABEL_TEMP'] = "ui.Label1.value"
config['LABEL_HUMIDITY'] = "ui.Label2.value"
config['LABEL_DATE'] = "ui.Label3.value"
#---------------------------------------------------------------------------------------------
# Keypad keep alive configuration
#---------------------------------------------------------------------------------------------
config["KeypadAliveServiceEnabled"] = True
# IP address of android alarm keypad
config['KEYPAD_IP_ADDRESSES'] = {
# Name : IP Address
'S4' : '192.168.2.155:8080'}
#'S6' : '192.168.2.88:8080',
#'P5' : '192.168.2.233:8080',
#'lgg4': '192.168.2.100:8080'}
config['KEYPAD_IP_ADDRESS'] = "192.168.2.155:8080"
config["KEYPAD_LOG_FILE"] = config['TEMP_PATH'] + "/keypad_log.txt"
# URI used to verify if keypad is alive
config['KEYPAD_PING_URL'] = "http://%IP%/Ping"
# URI used to fetch logfile
config['KEYPAD_LOG_URL'] = "http://" + config['KEYPAD_IP_ADDRESS'] + "/log"
#-----------------------------------------------------------------------------------------
# TTS configuration
#-----------------------------------------------------------------------------------------
config["ttsEnabled"] = True
# VoiceRSS token
config['VOICERSS_TOKEN'] = secret_config.VOICERSS_TOKEN
# VoiceRSS api path for generating mp3 from message
config['VOICERSS_URL'] = "http://api.voicerss.org/?key=" + config['VOICERSS_TOKEN'] + "&src=REPLACE_WITH_MESSAGE&f=22khz_16bit_mono&hl=sv-se"
# Url with key where to translate message to mp3
config['TTS_GENERATOR_IP_ADDRESS'] = "192.168.2.155:8080"
# Path to cached tts messages
config['TTS_PRECOMPILED_MSG_PATH'] = config["ROOT"] + "davan/tts_mp3/"
# Path to cached tts alarm messages
config['TTS_PRECOMPILED_ALARM_MSG_PATH'] = config["TTS_PRECOMPILED_MSG_PATH"] + "alarm/"
# Mp3 file to play
config['SPEAK_FILE'] = '/dev/shm/speak.mp3'
# Application used to play mp3 file on raspberry pi
config['SPEAK_CMD'] = '/usr/bin/mpg123'
# Url to fetch created mp3 file on android phone
config['TTS_GENERATOR_FETCH_URL'] = "http://" + config['TTS_GENERATOR_IP_ADDRESS'] + "/ttsFetch"
# Url to generate mp3 file on android phone
config['TTS_GENERATOR_CREATE_URL'] = "http://" + config['TTS_GENERATOR_IP_ADDRESS'] + "/tts"
# Select the speakers for plauging TTS messages, currently supports
# "RoxcoreService", "SonosService" or internal speaker
config['SPEAKER_SERVICE'] = "RoxcoreService"
#-----------------------------------------------------------------------------------------
# Mp3 provider configuration
#-----------------------------------------------------------------------------------------
config["mp3Enabled"] = True
config['MP3_ROOT_FOLDER'] = config['TTS_PRECOMPILED_MSG_PATH']
#-----------------------------------------------------------------------------------------
# DailyQuote configuration
#-----------------------------------------------------------------------------------------
config["DailyQuoteEnabled"] = True
# Path to dailyquote path
config['TTS_DAILY_QUOTE_PATH'] = config["TTS_PRECOMPILED_MSG_PATH"] + "daily_quote/"
# Path to dailyquote file
config['TTS_DAILY_QUOTE_FILE'] = config['TTS_DAILY_QUOTE_PATH'] + "daily_quote.mp3"
#---------------------------------------------------------------------------------------------
# Scenes to monitor, start if not running
# Monitors running scens on fibaro system
#---------------------------------------------------------------------------------------------
config["ActiveScenesMonitorEnabled"] = True
# List of scenes that should be monitored
config['MONITOR_SCENES'] = {'32'} # Clock scene
#---------------------------------------------------------------------------------------------
# UPS Virtual device configuration
#---------------------------------------------------------------------------------------------
config["UpsEnabled"] = True
# UPS virtual id on fibaro system
config["UPS_VD_ID"] = "156"
#
config["UPS_BUTTON_ID"] = "6"
#---------------------------------------------------------------------------------------------
# Internet speed test configuration
#---------------------------------------------------------------------------------------------
config["speedtestEnabled"] = True
config['SPEED_TEST_FILE'] = config["SERVICE_PATH"] + "speedtest/internet_speed_measure.sh"
config['SPEED_TEST_RESULT'] = config['SERVICE_PATH'] + "speedtest/speedtest.txt"
#---------------------------------------------------------------------------------------------
# Audio service configuration
#---------------------------------------------------------------------------------------------
config["AudioServiceEnabled"] = True
config['RECEIVER_TURN_ON'] = "onkyo --host 192.168.2.218 PWR01"
config['RECEIVER_TURN_OFF'] = "onkyo --host 192.168.2.218 PWR00"
config['RECEIVER_SELECT_INPUT'] = "onkyo --host 192.168.2.218 SLI02"
config['RECEIVER_SET_VOLUME'] = "onkyo --host 192.168.2.218 MVL25"
config["CHROMECAST_NAME"] = "ChromecastEntre"
#---------------------------------------------------------------------------------------------
# HTML Service configuration
#---------------------------------------------------------------------------------------------
config["HtmlServiceEnabled"] = True
config["HTML_INDEX_FILE"] = config['SERVICE_PATH'] + "html/index_template.html"
config["HTML_STYLE_FILE"] = config['SERVICE_PATH'] + "html/style.css"
config["HTML_STATISTICS_FILE"] = config['SERVICE_PATH'] + "html/statistics_template.html"
config["HTML_SELECT_LOGFILE"] = config['SERVICE_PATH'] + "html/select_logfile.html"
#---------------------------------------------------------------------------------------------
# Sonos Service configuration
#---------------------------------------------------------------------------------------------
config["SonosServiceEnabled"] = False
# Adress to Sonos speaker
config['SONOS_SPEAKERS'] = [
#Id, | Slogan | Address | Default | Play Announcement msg
'0, Livingroom, 192.168.2.108, True, False',
'1, Hallway, 192.168.2.108, False, False',
'2, All, , False, True',
]
#---------------------------------------------------------------------------------------------
# Weather Service configuration
#---------------------------------------------------------------------------------------------
config["WeatherEnabled"] = False
config["WEATHER_API_PATH"] = "http://api.wunderground.com/api/"
config["WEATHER_TOKEN"] = secret_config.WEATHER_TOKEN
config["WEATHER_STATION_ID"] = secret_config.WEATHER_STATION_ID
config["WUNDERGROUND_PATH"] = config["WEATHER_API_PATH"] + config["WEATHER_TOKEN"] + config["WEATHER_STATION_ID"]
# Weather virtual id on fibaro system
config["WEATHER_VD_ID"] = "79"
# Weather button to push
config["WEATHER_BUTTON_ID"] = "7"
#---------------------------------------------------------------------------------------------
# LightSchema Service configuration
# deviceType 0=Onoff, 1=dimmer, 2=virtualdevice
# Room, StartTime, StopTime, Interval(week/weekdays/weekend), lightLevel(0-100), deviceId, buttonId, randomTime, virtualDeviceUpdateId
#---------------------------------------------------------------------------------------------
config["LightSchemaServiceEnabled"] = True
config['LIGHT_SCHEMA'] = [
#Room | start | stop | Interval | deviceType | lightlevel | deviceId | labelid | random | virtualdevice | Only when armed
'KitchenTak, 06:15, 08:30, weekdays, 1, 10, 65, 1, 15, 194, 0',
'KitchenTak, 16:15, 23:45, week, 1, 10, 65, 2, 1, 194, 0',
'Uterum, sunset, 23:45, week, 0, -1, 192, 1, 10, 195, 0',
'Outdoor, sunset, 23:40, week, 0, -1, 191, 1, 20, 196, 0',
'Farstukvist, sunset, 23:55, week, 0, -1, 226, 1, 1, 227, 0',
'WilmaFonster,06:30, 08:10, week, 2, 10, 180, 1, 3, 197, 0',
'WilmaFonster,sunset, 22:15, weekdays, 2, 10, 180, 2, 3, 197, 0',
'WilmaFonster,sunset, 23:15, weekend, 2, 10, 180, 2, 3, 197, 0',
'WilmaBlinds, 07:30, 22:45, weekdays, 2, 14, 180, 3, 3, 197, 0',
'WilmaBlinds, 10:00, 22:55, weekend, 2, 14, 180, 3, 3, 197, 0',
'WilmaTak, 06:30, 08:15, weekdays, 1, 20, 173, 4, 20, 197, 1',
'WilmaTak, sunset, 22:00, week, 1, 20, 173, 5, 20, 197, 1',
'ViggoFonster,06:30, 08:00, weekdays, 2, 10, 181, 1, 3, 198, 0',
'ViggoFonster,sunset, 21:30, weekdays, 2, 10, 181, 2, 3, 198, 0',
'ViggoFonster,sunset, 23:00, weekend, 2, 10, 181, 2, 3, 198, 0',
'ViggoBlinds, 07:30, 21:30, weekdays, 2, 14, 181, 3, 3, 198, 0',
'ViggoBlinds, 10:00, 22:30, weekend, 2, 14, 181, 3, 3, 198, 0',
'ViggoTak, 06:30, 07:15, weekdays, 1, 10, 177, 4, 15, 198, 1',
'ViggoTak , sunset, 21:00, week, 1, 10, 177, 5, 15, 198, 1',
'Parkering, sunset, 23:55, week, 0, -1, 276, 1, 1, 281, 0',
'Datarum, 06:15, 08:30, week, 2, 1, 182, 1, 0, 304, 0',
'Datarum, 16:15, 23:42, week, 2, 1, 182, 2, 0, 304, 0',
'LjusTrad, 17:15, 23:30, week, 2, 14, 184, 1, 5, 307, 0',
'Sovrum, 09:15, 10:30, weekend, 2, 1, 185, 1, 5, 308, 0',
'Sovrum, 06:15, 08:15, weekdays, 2, 1, 185, 2, 5, 308, 0',
'Sovrum, 17:15, 23:30, week, 2, 1, 185, 3, 5, 308, 0',
]
config['LABEL_SCHEDULE'] = "ui.Schedule<BID>.value"
#---------------------------------------------------------------------------------------------
# ReceiverBot configuration
#---------------------------------------------------------------------------------------------
config["ReceiverBotServiceEnabled"] = True
config["RECEIVER_BOT_TOKEN"] = secret_config.RECEIVER_BOT_TOKEN
#---------------------------------------------------------------------------------------------
# Roxcore speaker configuration
#---------------------------------------------------------------------------------------------
config["RoxcoreServiceEnabled"] = True
config['ROXCORE_PORT_NR'] = "59152"
config['ROXCORE_SPEAKERS'] = [
#Id, | Slogan | Address | Default | Play Announcement msg
'0, Kitchen, 192.168.2.49, True, True',
'1, Hallway, 192.168.2.121, False, False',
'2, All, , False, True',
]
config['MESSAGE_ANNOUNCEMENT'] = "announcement.mp3"
#---------------------------------------------------------------------------------------------
# Announcement service
#---------------------------------------------------------------------------------------------
config["AnnouncementsServiceEnabled"] = True
config["ANNOUNCEMENT_MENU_PATH"] = config["ROOT"] + "menu.txt"
config["ANNOUNCEMENT_THEMEDAY_PATH"] = config["ROOT"] + "tema_dagar.txt"
config["IDIOM_ANNOUNCEMENTS"] = config["ROOT"] + "idiomatisk.txt"
config['ANNOUNCEMENTS_SCHEMA'] = [
#Slogan | Time, | Interval | | announcementname | speaker id | Text
'SleepTime, 23:02, weekdays, night, 0, Mia',
'SleepTimeViggo, 20:30, weekdays, night, 0, Viggo',
'SleepTimeWilma, 21:30, weekdays, night, 0, Wilma',
'Morning, 06:30, weekdays, morning, 0, -',
'MorningWeekend, 09:00, weekend, morning, 0, -',
'WilmaBirthDay, 08:00, 02/06, birthday, 0, -',
'ViggoBirthDay, 08:00, 20/06, birthday, 0, -',
'MiaBirthDay, 08:00, 30/06, birthday, 0, -',
'DavidBirthDay, 08:00, 08/07, birthday, 0, -',
'RadioWeekdaysOn, 06:15, weekdays, radio, 0, http://tx-bauerse.sharp-stream.com/http_live.php?i=mixmegapol_instream_se_mp3',
# 'RadioWeekdaysOff, 08:30, weekdays, radio, 0, stop',
# 'EveningWater, 22:00, week, water, 0',
]
#---------------------------------------------------------------------------------------------
# Calendar service
#---------------------------------------------------------------------------------------------
config['CalendarServiceEnabled'] = True
config['GOOGLE_CALENDAR_TOKEN'] = secret_config.GOOGLE_CALENDAR_TOKEN
#---------------------------------------------------------------------------------------------
# Sun service
#---------------------------------------------------------------------------------------------
config['SunServiceEnabled'] = True
#---------------------------------------------------------------------------------------------
# Scale service
#---------------------------------------------------------------------------------------------
config['ScaleServiceEnabled'] = False
config['CONSUMER_KEY'] = secret_config.NOKIA_CONSUMER_KEY
config['CONSUMER_SECRET'] = secret_config.NOKIA_CONSUMER_SECRET
config['OAUTH_VERIFIER'] = secret_config.NOKIA_OAUTH_VERIFIER
config['ACCESS_TOKEN'] = secret_config.NOKIA_ACCESS_TOKEN
config['ACCESS_TOKEN_SECRET'] = secret_config.NOKIA_ACCESS_TOKEN_SECRET
config['NOKIA_USER_ID'] = secret_config.NOKIA_USER_ID
#---------------------------------------------------------------------------------------------
# TV service
#---------------------------------------------------------------------------------------------
config['TvServiceEnabled'] = True
config['TvServiceTimeout'] = 300
config['HARMONY_IP_ADRESS'] = '192.168.2.143'
config['WATCH_TV_ACTIVITY'] = '26681450'
#---------------------------------------------------------------------------------------------
# Connectivity service
# Check internet connectivity
#---------------------------------------------------------------------------------------------
config['ConnectivityServiceEnabled'] = True
#---------------------------------------------------------------------------------------------
# Fibaro service
# Monitor Fibaro system alarm state.
#---------------------------------------------------------------------------------------------
config['FibaroServiceEnabled'] = True
config['FibaroTimeout'] = 300
config['FibaroVirtualDeviceId'] = "69"
#---------------------------------------------------------------------------------------------
# DishWashService
# Determine when dishwascher is ready with a Fibaro power plug
#---------------------------------------------------------------------------------------------
config['DishWashServiceEnabled'] = True
#---------------------------------------------------------------------------------------------
# DepartureService
# Fetch
#---------------------------------------------------------------------------------------------
config['DepartureServiceEnabled'] = True
config['DEPARTURE_SETTING'] = secret_config.SL_API_KEYS
#---------------------------------------------------------------------------------------------
# AlarmService
# Keep a list of active alarms
#---------------------------------------------------------------------------------------------
config["AlarmServiceEnabled"] = True
#---------------------------------------------------------------------------------------------
# PowerUsageService
# Measure usage time of certain devices with a Fibaro power plug.
#---------------------------------------------------------------------------------------------
config["PowerUsageServiceEnabled"] = True
#---------------------------------------------------------------------------------------------
# PowerUsageService
# Measure usage time of certain devices with a Fibaro power plug.
#---------------------------------------------------------------------------------------------
config["TradfriServiceEnabled"] = True
config['TRADFRI_GATEWAY_IP'] = "192.168.2.4"
config['TRADFRI_ID'] = "tradfri-pi-bash"
config['TRADFRI_ID_KEY'] = secret_config.TRADFRI_KEYS
config['TRADFRI_DEVICE_TYPES'] = [
# DeviceTypeName DeviceTypeId, Close/Off, Open/On
'SocketController, 3311, 5850, 0, 1',
'BlindController, 15015, 5536, 0, 100'
]
config['TRADFRI_DEVICES'] = [
# Name | ID, | DeviceType
'ViggoWindow, 65539, SocketController',
'ViggoShelf, 65540, SocketController',
'ViggoBlind, 65549, BlindController',
'WilmaWindow, 65541, SocketController',
'WilmaSlinga, 65542, SocketController',
'WilmaBlindRight, 65552, BlindController',
'WilmaBlindLeft, 65555, BlindController',
'Datarum, 65544, SocketController',
'Keypad, 65545, SocketController',
'LjusTrad, 65547, SocketController',
'Sovrum, 65546, SocketController'
]
#---------------------------------------------------------------------------------------------
# MoistureMonitorService
# Measure usage time of certain devices with a Fibaro power plug.
#---------------------------------------------------------------------------------------------
config['MoistureMonitorServiceEnabled'] = True
config['MoistureMonitorTimeout'] = 900
config['MoistureVdId']="298"
config['MoistureMaxLimit']=75
config['AdaxServiceEnabled'] = True
def create(private_config_file="/home/pi/private_config.py", debugPrint=False):
if (not private_config_file == None and len(private_config_file) > 0 and os.path.exists(private_config_file)):
try:
filename = os.path.basename(private_config_file)
modulename = os.path.splitext(filename)[0]
my_secrets = imp.load_source(modulename, private_config_file)
except :
print "Cannot import file " + private_config_file +" using default"
import no_private_config
my_secrets = no_private_config
else:
import no_private_config
my_secrets = no_private_config
config = dict()
create_config(my_secrets, config)
if debugPrint:
common.debug_formated(config)
return config
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from neutron.db import db_base_plugin_v2
from neutron.db import subnet_service_type_db_models
from neutron.extensions import subnet_service_types
from neutron.tests.unit.db import test_db_base_plugin_v2
class SubnetServiceTypesExtensionManager(object):
def get_resources(self):
return []
def get_actions(self):
return []
def get_request_extensions(self):
return []
def get_extended_resources(self, version):
extension = subnet_service_types.Subnet_service_types()
return extension.get_extended_resources(version)
class SubnetServiceTypesExtensionTestPlugin(
db_base_plugin_v2.NeutronDbPluginV2,
subnet_service_type_db_models.SubnetServiceTypeMixin):
"""Test plugin to mixin the subnet service_types extension.
"""
supported_extension_aliases = ["subnet-service-types"]
class SubnetServiceTypesExtensionTestCase(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
"""Test API extension subnet_service_types attributes.
"""
CIDRS = ['10.0.0.0/8', '20.0.0.0/8', '30.0.0.0/8']
IP_VERSION = 4
def setUp(self):
plugin = ('neutron.tests.unit.extensions.test_subnet_service_types.' +
'SubnetServiceTypesExtensionTestPlugin')
ext_mgr = SubnetServiceTypesExtensionManager()
super(SubnetServiceTypesExtensionTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
def _create_service_subnet(self, service_types=None, cidr=None,
network=None, enable_dhcp=False):
if not network:
with self.network() as network:
pass
network = network['network']
if not cidr:
cidr = self.CIDRS[0]
args = {'net_id': network['id'],
'tenant_id': network['tenant_id'],
'cidr': cidr,
'ip_version': self.IP_VERSION,
'enable_dhcp': enable_dhcp}
if service_types:
args['service_types'] = service_types
return self._create_subnet(self.fmt, **args)
def _test_create_subnet(self, service_types, expect_fail=False):
res = self._create_service_subnet(service_types)
if expect_fail:
self.assertEqual(webob.exc.HTTPClientError.code,
res.status_int)
else:
subnet = self.deserialize('json', res)
subnet = subnet['subnet']
self.assertEqual(len(service_types),
len(subnet['service_types']))
for service in service_types:
self.assertIn(service, subnet['service_types'])
def test_create_subnet_blank_type(self):
self._test_create_subnet([])
def test_create_subnet_bar_type(self):
self._test_create_subnet(['network:bar'])
def test_create_subnet_foo_type(self):
self._test_create_subnet(['compute:foo'])
def test_create_subnet_bar_and_foo_type(self):
self._test_create_subnet(['network:bar', 'compute:foo'])
def test_create_subnet_invalid_type(self):
self._test_create_subnet(['foo'], expect_fail=True)
self._test_create_subnet([1], expect_fail=True)
def test_create_subnet_no_type(self):
res = self._create_service_subnet()
subnet = self.deserialize('json', res)
subnet = subnet['subnet']
self.assertFalse(subnet['service_types'])
def _test_update_subnet(self, subnet, service_types, fail_code=None):
data = {'subnet': {'service_types': service_types}}
req = self.new_update_request('subnets', data, subnet['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
if fail_code is not None:
self.assertEqual(fail_code,
res['NeutronError']['type'])
else:
subnet = res['subnet']
self.assertEqual(len(service_types),
len(subnet['service_types']))
for service in service_types:
self.assertIn(service, subnet['service_types'])
def test_update_subnet_zero_to_one(self):
service_types = ['network:foo']
# Create a subnet with no service type
res = self._create_service_subnet()
subnet = self.deserialize('json', res)['subnet']
# Update it with a single service type
self._test_update_subnet(subnet, service_types)
def test_update_subnet_one_to_two(self):
service_types = ['network:foo']
# Create a subnet with one service type
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with two service types
service_types.append('compute:bar')
self._test_update_subnet(subnet, service_types)
def test_update_subnet_two_to_one(self):
service_types = ['network:foo', 'compute:bar']
# Create a subnet with two service types
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with one service type
service_types = ['network:foo']
self._test_update_subnet(subnet, service_types)
def test_update_subnet_one_to_zero(self):
service_types = ['network:foo']
# Create a subnet with one service type
res = self._create_service_subnet(service_types)
subnet = self.deserialize('json', res)['subnet']
# Update it with zero service types
service_types = []
self._test_update_subnet(subnet, service_types)
def test_update_subnet_invalid_type(self):
# Create a subnet with no service type
res = self._create_service_subnet()
subnet = self.deserialize('json', res)['subnet']
# Update it with invalid service type(s)
self._test_update_subnet(subnet, ['foo'],
fail_code='InvalidSubnetServiceType')
self._test_update_subnet(subnet, [2],
fail_code='InvalidInputSubnetServiceType')
def _assert_port_res(self, port, service_type, subnet, fallback,
error='IpAddressGenerationFailureNoMatchingSubnet'):
res = self.deserialize('json', port)
if fallback:
port = res['port']
self.assertEqual(1, len(port['fixed_ips']))
self.assertEqual(service_type, port['device_owner'])
self.assertEqual(subnet['id'], port['fixed_ips'][0]['subnet_id'])
else:
self.assertEqual(error, res['NeutronError']['type'])
def test_create_port_with_matching_service_type(self):
with self.network() as network:
pass
matching_type = 'network:foo'
non_matching_type = 'network:bar'
# Create a subnet with no service types
self._create_service_subnet(network=network)
# Create a subnet with a non-matching service type
self._create_service_subnet([non_matching_type],
cidr=self.CIDRS[2],
network=network)
# Create a subnet with a service type to match the port device owner
res = self._create_service_subnet([matching_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Create a port with device owner matching the correct service subnet
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=matching_type)
self._assert_port_res(port, matching_type, service_subnet, True)
def test_create_port_without_matching_service_type(self, fallback=True):
with self.network() as network:
pass
subnet = ''
matching_type = 'compute:foo'
non_matching_type = 'network:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a non-matching service type
self._create_service_subnet([non_matching_type],
cidr=self.CIDRS[1],
network=network)
# Create a port with device owner not matching the service subnet
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=matching_type)
self._assert_port_res(port, matching_type, subnet, fallback)
def test_create_port_without_matching_service_type_no_fallback(self):
self.test_create_port_without_matching_service_type(fallback=False)
def test_create_port_no_device_owner(self, fallback=True):
with self.network() as network:
pass
subnet = ''
service_type = 'compute:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a service_type
self._create_service_subnet([service_type],
cidr=self.CIDRS[1],
network=network)
# Create a port without a device owner
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'])
self._assert_port_res(port, '', subnet, fallback)
def test_create_port_no_device_owner_no_fallback(self):
self.test_create_port_no_device_owner(fallback=False)
def test_create_port_exhausted_subnet(self, fallback=True):
with self.network() as network:
pass
subnet = ''
service_type = 'compute:foo'
if fallback:
# Create a subnet with no service types
res = self._create_service_subnet(network=network)
subnet = self.deserialize('json', res)['subnet']
# Create a subnet with a service_type
res = self._create_service_subnet([service_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Update the service subnet with empty allocation pools
data = {'subnet': {'allocation_pools': []}}
req = self.new_update_request('subnets', data, service_subnet['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
# Create a port with a matching device owner
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=service_type)
self._assert_port_res(port, service_type, subnet, fallback,
error='IpAddressGenerationFailure')
def test_create_port_exhausted_subnet_no_fallback(self):
self.test_create_port_exhausted_subnet(fallback=False)
def test_create_dhcp_port_compute_subnet(self, enable_dhcp=True):
with self.network() as network:
pass
res = self._create_service_subnet(['compute:nova'],
network=network,
enable_dhcp=enable_dhcp)
subnet = self.deserialize('json', res)['subnet']
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
fixed_ips=[{'subnet_id': subnet['id']}],
device_owner='network:dhcp')
self._assert_port_res(port, 'network:dhcp', subnet, enable_dhcp)
def test_create_dhcp_port_compute_subnet_no_dhcp(self):
self.test_create_dhcp_port_compute_subnet(enable_dhcp=False)
def test_update_port_fixed_ips(self):
with self.network() as network:
pass
service_type = 'compute:foo'
# Create a subnet with a service_type
res = self._create_service_subnet([service_type],
cidr=self.CIDRS[1],
network=network)
service_subnet = self.deserialize('json', res)['subnet']
# Create a port with a matching device owner
network = network['network']
port = self._create_port(self.fmt,
net_id=network['id'],
tenant_id=network['tenant_id'],
device_owner=service_type)
port = self.deserialize('json', port)['port']
# Update the port's fixed_ips. It's ok to reuse the same IP it already
# has.
ip_address = port['fixed_ips'][0]['ip_address']
data = {'port': {'fixed_ips': [{'subnet_id': service_subnet['id'],
'ip_address': ip_address}]}}
# self._update will fail with a MismatchError if the update cannot be
# applied
port = self._update('ports', port['id'], data)
class SubnetServiceTypesExtensionTestCasev6(
SubnetServiceTypesExtensionTestCase):
CIDRS = ['2001:db8:2::/64', '2001:db8:3::/64', '2001:db8:4::/64']
IP_VERSION = 6
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import warnings
from string import Template
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.template import RequestContext
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.utils import unittest
from django.utils.encoding import smart_str
from selenium.webdriver.phantomjs.webdriver import WebDriver
from utils import script_prefix
# Raise errors on DeprecationWarnings
warnings.simplefilter('error', DeprecationWarning)
class AbstractJSReverseTestCase(object):
client = None
urls = 'tests.test_urls'
@classmethod
def setUpClass(cls):
if hasattr(django, 'setup'):
# for django >= 1.7
django.setup()
cls.selenium = WebDriver()
super(AbstractJSReverseTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(AbstractJSReverseTestCase, cls).tearDownClass()
def setUp(self):
self.client = Client()
def assertEqualJSUrlEval(self, url_call, expected_url):
response = self.client.post('/jsreverse/')
self.assertEqual(self.selenium.execute_script('%s return %s;' % (smart_str(response.content), url_call)),
expected_url)
class JSReverseViewTestCaseMinified(AbstractJSReverseTestCase, TestCase):
def test_view_no_url_args(self):
self.assertEqualJSUrlEval('Urls.test_no_url_args()', '/test_no_url_args/')
def test_view_one_url_arg(self):
self.assertEqualJSUrlEval('Urls.test_one_url_args("arg_one")', '/test_one_url_args/arg_one/')
def test_view_two_url_args(self):
self.assertEqualJSUrlEval('Urls.test_two_url_args("arg_one", "arg_two")', '/test_two_url_args/arg_one-arg_two/')
def test_view_optional_url_arg(self):
self.assertEqualJSUrlEval('Urls.test_optional_url_arg("arg_two")',
'/test_optional_url_arg/2_arg_two/')
self.assertEqualJSUrlEval('Urls.test_optional_url_arg("arg_one", "arg_two")',
'/test_optional_url_arg/1_arg_one-2_arg_two/')
def test_unicode_url_name(self):
self.assertEqualJSUrlEval('Urls.test_unicode_url_name()', '/test_unicode_url_name/')
@override_settings(JS_REVERSE_JS_VAR_NAME='Foo')
def test_js_var_name_changed_valid(self):
self.assertEqualJSUrlEval('Foo.test_no_url_args()', '/test_no_url_args/')
@override_settings(JS_REVERSE_JS_VAR_NAME='1test')
def test_js_var_name_changed_to_invalid(self):
with self.assertRaises(ImproperlyConfigured):
self.client.post('/jsreverse/')
def test_namespaces(self):
self.assertEqualJSUrlEval('Urls["ns1:test_two_url_args"]("arg_one", "arg_two")',
'/ns1/test_two_url_args/arg_one-arg_two/')
self.assertEqualJSUrlEval('Urls["ns2:test_two_url_args"]("arg_one", "arg_two")',
'/ns2/test_two_url_args/arg_one-arg_two/')
def test_namespaces_with_args(self):
self.assertEqualJSUrlEval('Urls["ns_arg:test_two_url_args"]("arg_one", "arg_two", "arg_three")',
'/nsarg_one/test_two_url_args/arg_two-arg_three/')
def test_namespaces_nested(self):
self.assertEqualJSUrlEval('Urls["nestedns:ns1:test_two_url_args"]("arg_one", "arg_two")',
'/nestedns/ns1/test_two_url_args/arg_one-arg_two/')
def test_content_type(self):
response = self.client.post('/jsreverse/')
self.assertEqual(response['Content-Type'], 'application/javascript')
@override_settings(JS_REVERSE_JS_MINIFY='invalid')
def test_js_minfiy_changed_to_invalid(self):
with self.assertRaises(ImproperlyConfigured):
self.client.post('/jsreverse/')
def test_namespace_in_urls(self):
response = self.client.get('/jsreverse/')
self.assertContains(response, 'exclude_namespace', status_code=200)
@override_settings(JS_REVERSE_EXCLUDE_NAMESPACES=['exclude_namespace'])
def test_namespace_not_in_response(self):
response = self.client.get('/jsreverse/')
self.assertNotContains(response, 'exclude_namespace', status_code=200)
def test_script_prefix(self):
with script_prefix('/foobarlala/'):
self.assertEqualJSUrlEval('Urls["nestedns:ns1:test_two_url_args"]("arg_one", "arg_two")',
'/foobarlala/nestedns/ns1/test_two_url_args/arg_one-arg_two/')
def test_duplicate_name(self):
self.assertEqualJSUrlEval('Urls.test_duplicate_name("arg_one")',
'/test_duplicate_name/arg_one/')
self.assertEqualJSUrlEval('Urls.test_duplicate_name("arg_one", "arg_two")',
'/test_duplicate_name/arg_one-arg_two/')
def test_duplicate_argcount(self):
self.assertEqualJSUrlEval('Urls.test_duplicate_argcount ({arg_one: "arg_one"})',
'/test_duplicate_argcount/arg_one-/')
self.assertEqualJSUrlEval('Urls.test_duplicate_argcount ({arg_two: "arg_two"})',
'/test_duplicate_argcount/-arg_two/')
self.assertEqualJSUrlEval('Urls.test_duplicate_argcount ({arg_one: "arg_one", arg_two: "arg_two"})',
'/test_duplicate_argcount/arg_one-arg_two/')
@override_settings(JS_REVERSE_JS_MINIFY=False)
class JSReverseViewTestCaseNotMinified(JSReverseViewTestCaseMinified):
def test_minification(self):
js_not_minified = smart_str(self.client.post('/jsreverse/').content)
with override_settings(JS_REVERSE_JS_MINIFY=True):
js_minified = smart_str(self.client.post('/jsreverse/').content)
self.assertTrue(len(js_minified) < len(js_not_minified))
class JSReverseViewTestCaseGlobalObjectName(JSReverseViewTestCaseMinified):
def test_global_object_name_default(self):
js_content = smart_str(self.client.post('/jsreverse/').content)
self.assertTrue(js_content.startswith('this.'))
@override_settings(JS_REVERSE_JS_GLOBAL_OBJECT_NAME='window')
def test_global_object_name_change(self):
js_content = smart_str(self.client.post('/jsreverse/').content)
self.assertTrue(js_content.startswith('window.'))
@override_settings(JS_REVERSE_JS_GLOBAL_OBJECT_NAME='1test')
def test_global_object_name_change_invalid_identifier(self):
with self.assertRaises(ImproperlyConfigured):
self.client.post('/jsreverse/')
class JSReverseStaticFileSaveTest(AbstractJSReverseTestCase, TestCase):
def test_reverse_js_file_save(self):
call_command('collectstatic_js_reverse')
path = os.path.join(settings.STATIC_ROOT, 'django_js_reverse', 'js', 'reverse.js')
f = open(path)
content1 = f.read()
if hasattr(content1, 'decode'):
content1 = content1.decode()
r2 = self.client.get('/jsreverse/')
content2 = r2.content
if hasattr(content2, 'decode'):
content2 = content2.decode()
self.assertEqual(len(content1), len(content2), 'Static file don\'t match http response content_1')
self.assertEqual(content1, content2, 'Static file don\'t match http response content_2')
# test for excpetion if STATIC_ROOT is not set
with override_settings(STATIC_ROOT=None):
with self.assertRaises(ImproperlyConfigured):
call_command('collectstatic_js_reverse')
def test_script_prefix(self):
script_prefix = '/test/foo/bar/'
with override_settings(JS_REVERSE_SCRIPT_PREFIX=script_prefix):
self.assertEqualJSUrlEval('Urls.test_no_url_args()', '{0}test_no_url_args/'.format(script_prefix))
class JSReverseTemplateTagTest(AbstractJSReverseTestCase, TestCase):
def test_tpl_tag_with_request_in_contect(self):
from django_js_reverse.templatetags.js_reverse import js_reverse_inline
context_instance = RequestContext(self.client.request)
Template("{%% load %s %%}{%% %s %%}" % ('js_reverse', js_reverse_inline(context_instance)))
def test_tpl_tag_without_request_in_contect(self):
from django_js_reverse.templatetags.js_reverse import js_reverse_inline
context_instance = RequestContext(None)
Template("{%% load %s %%}{%% %s %%}" % ('js_reverse', js_reverse_inline(context_instance)))
if __name__ == '__main__':
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..') + os.sep)
unittest.main()
| |
"""Tests for tensorflow.ops.random_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class RandomNormalTest(tf.test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
rng = tf.random_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in tf.float32, tf.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
shape = [2, 3, 4]
rnd1 = tf.random_normal(shape, 0.0, 1.0, tf.float32)
rnd2 = tf.random_normal(shape, 0.0, 1.0, tf.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
class TruncatedNormalTest(tf.test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
rng = tf.truncated_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
# NOTE: RandomParameters on GPU is not supported.
for use_gpu in [False]:
for dt in tf.float32, tf.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in tf.float32, tf.float64:
results = {}
for use_gpu in [False, True]:
# We need a particular larger number of samples to test multiple rounds
# on GPU
sampler = self._Sampler(1000000, 0.0, 1.0, dt, use_gpu=use_gpu,
seed=12345)
results[use_gpu] = sampler()
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
# The effective standard deviation of truncated normal is 85% of the
# requested one.
def testStdDev(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
stddev = 3.0
sampler = self._Sampler(100000, 0.0, stddev, dt, use_gpu=use_gpu)
x = sampler()
print("std(x)", np.std(x), abs(np.std(x) / stddev - 0.85))
self.assertTrue(abs(np.std(x) / stddev - 0.85) < 0.04)
def testNoCSE(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
shape = [2, 3, 4]
rnd1 = tf.truncated_normal(shape, 0.0, 1.0, tf.float32)
rnd2 = tf.truncated_normal(shape, 0.0, 1.0, tf.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
class RandomUniformTest(tf.test.TestCase):
def _Sampler(self, num, minv, maxv, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=tf.Graph()) as sess:
rng = tf.random_uniform(
[num], minval=minv, maxval=maxv, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
def testRange(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sampler = self._Sampler(1000, -2., 8., dt, use_gpu=use_gpu)
x = sampler()
self.assertTrue(-2 <= np.min(x))
self.assertTrue(np.max(x) <= 8)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu)
x = sampler()
y = sampler()
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in tf.float32, tf.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for use_gpu in [False, True]:
for dt in tf.float32, tf.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=345)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
shape = [2, 3, 4]
rnd1 = tf.random_uniform(shape, 0.0, 1.0,
dtype=tf.float32)
rnd2 = tf.random_uniform(shape, 0.0, 1.0,
dtype=tf.float32)
diff = (rnd2 - rnd1).eval()
self.assertTrue(np.linalg.norm(diff) > 0.1)
class RandomShapeTest(tf.test.TestCase):
def testRandomParameters(self):
# Fully known shape.
rnd1 = tf.truncated_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = tf.truncated_normal(tf.placeholder(tf.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = tf.truncated_normal(tf.placeholder(tf.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomNormal(self):
# Fully known shape.
rnd1 = tf.random_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = tf.random_normal(tf.placeholder(tf.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = tf.random_normal(tf.placeholder(tf.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomUniform(self):
# Fully known shape.
rnd1 = tf.random_uniform([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = tf.random_uniform(
tf.placeholder(tf.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = tf.random_uniform(tf.placeholder(tf.int32))
self.assertIs(None, rnd3.get_shape().ndims)
if __name__ == "__main__":
tf.test.main()
| |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import logging
from torch.autograd import Variable
from .utils import load_embeddings, AverageMeter
#from .rnn_reader import RnnDocReader
import pdb
logger = logging.getLogger('DrQA')
class DocReaderModel(object):
"""High level model that handles intializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
def __init__(self, opt, word_dict, char_dict, feature_dict, state_dict=None):
#Cudnn
#if not opt['use_cudnn']:
# torch.backends.cudnn.enabled=False
# Book-keeping.
self.opt = opt
self.word_dict = word_dict
self.char_dict = char_dict
self.feature_dict = feature_dict
self.updates = 0
self.train_loss = AverageMeter()
self.train_loss_QA = AverageMeter()
self.train_loss_sentpredict = AverageMeter()
#pdb.set_trace()
self.input_idx_bdy=5
self.target_idx_start=5
if opt['add_char2word']:
self.input_idx_bdy += 2 # x1_c, x2_c
self.target_idx_start += 2
if opt['ans_sent_predict']:
self.input_idx_bdy += 1 # x1_sent_mask
self.target_idx_start += 3
# Building network.
if opt['net'] == 'rnn_reader':
from .rnn_reader import RnnDocReader
elif opt['net'] == 'rnet_qp':
from .rnet_qp import RnnDocReader
elif opt['net'] == 'rnet':
from .rnet import RnnDocReader
self.network = RnnDocReader(opt)
if state_dict:
new_state = set(self.network.state_dict().keys())
for k in list(state_dict['network'].keys()):
if not k in new_state:
del state_dict['network'][k]
self.network.load_state_dict(state_dict['network'])
# Building optimizer.
parameters = [p for p in self.network.parameters() if p.requires_grad]
if opt['optimizer'] == 'sgd':
self.optimizer = optim.SGD(parameters, opt['learning_rate'],
momentum=opt['momentum'],
weight_decay=opt['weight_decay'])
elif opt['optimizer'] == 'adamax':
self.optimizer = optim.Adamax(parameters,
weight_decay=opt['weight_decay'],
lr=self.opt['learning_rate'])
elif self.opt['optimizer'] == 'adam':
self.optimizer = optim.Adam(parameters,
weight_decay=self.opt['weight_decay'],
lr=self.opt['learning_rate'])
else:
raise RuntimeError('Unsupported optimizer: %s' % opt['optimizer'])
def set_lrate(self, lrate):
self.optimizer.param_groups[0]['lr']=lrate
def set_embeddings(self):
# Read word embeddings.
if 'embedding_file' not in self.opt:
logger.warning('[ WARNING: No embeddings provided. '
'Keeping random initialization. ]')
return
logger.info('[ Loading pre-trained embeddings ]')
embeddings = load_embeddings(self.opt, self.word_dict)
logger.info('[ Num embeddings = %d ]' % embeddings.size(0))
# Sanity check dimensions
new_size = embeddings.size()
old_size = self.network.embedding.weight.size()
if new_size[1] != old_size[1]:
raise RuntimeError('Embedding dimensions do not match.')
if new_size[0] != old_size[0]:
logger.warning(
'[ WARNING: Number of embeddings changed (%d->%d) ]' %
(old_size[0], new_size[0])
)
# Swap weights
self.network.embedding.weight.data = embeddings
# If partially tuning the embeddings, keep the old values
if self.opt['tune_partial'] > 0:
if self.opt['tune_partial'] + 2 < embeddings.size(0):
fixed_embedding = embeddings[self.opt['tune_partial'] + 2:]
self.network.fixed_embedding = fixed_embedding
def update(self, ex):
# Train mode
self.network.train()
#pdb.set_trace()
# Transfer to GPU
if self.opt['cuda']:
#inputs = [Variable(e.cuda(async=True)) for e in ex[:5]]
#target_s = Variable(ex[5].cuda(async=True))
#target_e = Variable(ex[6].cuda(async=True))
inputs = [Variable(e.cuda(async=True)) for e in ex[:self.input_idx_bdy]]
target_s = Variable(ex[self.target_idx_start].cuda(async=True))
target_e = Variable(ex[self.target_idx_start+1].cuda(async=True))
else:
#inputs = [Variable(e) for e in ex[:5]]
#target_s = Variable(ex[5])
#target_e = Variable(ex[6]
inputs = [Variable(e) for e in ex[:self.input_idx_bdy]]
target_s = Variable(ex[self.target_idx_start])
target_e = Variable(ex[self.target_idx_start+1])
#pdb.set_trace()
if self.opt['ans_sent_predict']:
inputs = inputs + [ex[self.input_idx_bdy]]
target_sent = Variable(torch.from_numpy(np.asarray(ex[self.input_idx_bdy+1])).cuda(async=True))
"""
inputs = [Variable(e.cuda(async=True)) for e in ex[:5]]
inputs += [[e[0] for e in ex[5:7]]] # Add targets to inputs for training pointer network
target_s = Variable(ex[5].cuda(async=True))
target_e = Variable(ex[6].cuda(async=True))
else:
inputs = [Variable(e) for e in ex[:5]]
inputs += [[e[0] for e in ex[5:7]]] # Add targets to inputs for training pointer network
target_s = Variable(ex[5])
target_e = Variable(ex[6])
"""
# Run forward
#pdb.set_trace()
score_list = self.network(*inputs)
if len(score_list) == 3:
score_s = score_list[0]
score_e = score_list[1]
score_sent = score_list[2]
elif len(score_list) == 2:
score_s = score_list[0]
score_e = score_list[1]
elif len(score_list) == 1:
score_sent = score_list[0]
# Define computation graph for multi-task learning
if self.opt['task_QA'] and not self.opt['ans_sent_predict']:
loss_QA = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
loss = loss_QA
elif self.opt['ans_sent_predict'] and self.opt['task_QA']:
loss_QA = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
loss_sent = F.nll_loss(score_sent, target_sent)
loss = loss_QA + self.opt['coeff_ans_predict']*loss_sent
self.train_loss_QA.update(loss_QA.data[0], ex[0].size(0))
self.train_loss_sentpredict.update(loss_sent.data[0], ex[0].size(0))
# Update
#pdb.set_trace()
self.train_loss.update(loss.data[0], ex[0].size(0))
# Clear gradients and run backward
self.optimizer.zero_grad()
#if self.opt['ans_sent_predict']:
#loss.backward(retain_variables=True) # since we define multi-task learning graph, we don't have to do retain_variables
#else:
loss.backward()
# Clip gradients
if self.opt['grad_clipping'] > 0:
torch.nn.utils.clip_grad_norm(self.network.parameters(), self.opt['grad_clipping'])
# Update parameters
self.optimizer.step()
self.updates += 1
# Reset any partially fixed parameters (e.g. rare words)
self.reset_parameters()
def predict(self, ex):
# Eval mode
self.network.eval()
#pdb.set_trace()
# Transfer to GPU
if self.opt['cuda']:
inputs = [Variable(e.cuda(async=True)) for e in ex[:self.input_idx_bdy]]
else:
inputs = [Variable(e) for e in ex[:self.input_idx_bdy]]
# Run forward
#pdb.set_trace()
score_list = self.network(*inputs)
score_s = score_list[0]
score_e = score_list[1]
# Transfer to CPU/normal tensors for numpy ops
score_s = score_s.data.cpu()
score_e = score_e.data.cpu()
# Get argmax text spans
text = ex[-2]
spans = ex[-1]
predictions = []
max_len = self.opt['max_len'] or score_s.size(1)
for i in range(score_s.size(0)):
scores = torch.ger(score_s[i], score_e[i])
scores.triu_().tril_(max_len - 1)
scores = scores.numpy()
s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)
s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]
predictions.append(text[i][s_offset:e_offset])
return predictions
def reset_parameters(self):
# Reset fixed embeddings to original value
if self.opt['tune_partial'] > 0:
offset = self.opt['tune_partial'] + 2
if offset < self.network.embedding.weight.data.size(0):
self.network.embedding.weight.data[offset:] \
= self.network.fixed_embedding
def save(self, filename):
params = {
'state_dict': {
'network': self.network.state_dict(),
},
'word_dict': self.word_dict,
'char_dict': self.char_dict,
'feature_dict': self.feature_dict,
'config': self.opt,
}
try:
torch.save(params, filename)
except BaseException:
logger.warn('[ WARN: Saving failed... continuing anyway. ]')
def cuda(self):
self.network.cuda()
| |
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tasks related to Surveys.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
import logging
from google.appengine.api.labs import taskqueue
from django import http
from soc.logic import system
from soc.tasks.helper import error_handler
def getDjangoURLPatterns():
"""Returns the URL patterns for the tasks in this module.
"""
patterns = [(r'tasks/surveys/projects/send_reminder/spawn$',
'soc.tasks.surveys.spawnRemindersForProjectSurvey'),
(r'tasks/surveys/projects/send_reminder/send$',
'soc.tasks.surveys.sendSurveyReminderForProject')]
return patterns
def spawnRemindersForProjectSurvey(request, *args, **kwargs):
"""Spawns tasks for each StudentProject in the given Program.
Expects the following to be present in the POST dict:
program_key: Specifies the program key name for which to loop over all the
StudentProjects for
survey_key: specifies the key name for the ProjectSurvey to send reminders
for
survey_type: either project or grading depending on the type of Survey
project_key: optional to specify which project was the last for which a
task was spawn
Args:
request: Django Request object
"""
from google.appengine.ext import db
from soc.modules.gsoc.logic.models.program import logic as program_logic
from soc.modules.gsoc.logic.models.student_project import logic as \
student_project_logic
# set default batch size
batch_size = 10
post_dict = request.POST
# retrieve the program_key and survey_key from POST data
program_key = post_dict.get('program_key')
survey_key = post_dict.get('survey_key')
survey_type = post_dict.get('survey_type')
if not (program_key and survey_key and survey_type):
# invalid task data, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid sendRemindersForProjectSurvey data: %s' % post_dict)
# get the program for the given keyname
program_entity = program_logic.getFromKeyName(program_key)
if not program_entity:
# invalid program specified, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid program specified: %s' % program_key)
# check and retrieve the project_key that has been done last
if 'project_key' in post_dict:
project_start_key = post_dict['project_key']
else:
project_start_key = None
# get all valid StudentProjects from starting key
fields = {'program': program_entity,
'status': 'accepted'}
if project_start_key:
# retrieve the last project that was done
project_start = student_project_logic.getFromKeyName(project_start_key)
if not project_start:
# invalid starting project key specified, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid Student Project Key specified: %s' %(project_start_key))
fields['__key__ >'] = project_start.key()
project_entities = student_project_logic.getForFields(fields,
limit=batch_size)
for project_entity in project_entities:
# pass along these params as POST to the new task
task_params = {'survey_key': survey_key,
'survey_type': survey_type,
'project_key': project_entity.key().id_or_name()}
task_url = '/tasks/surveys/projects/send_reminder/send'
new_task = taskqueue.Task(params=task_params, url=task_url)
new_task.add('mail')
if len(project_entities) == batch_size:
# spawn new task starting from the last
new_project_start = project_entities[batch_size-1].key().id_or_name()
# pass along these params as POST to the new task
task_params = {'program_key': program_key,
'survey_key': survey_key,
'survey_type': survey_type,
'project_key': new_project_start}
task_url = '/tasks/surveys/projects/send_reminder/spawn'
new_task = taskqueue.Task(params=task_params, url=task_url)
new_task.add()
# return OK
return http.HttpResponse()
def sendSurveyReminderForProject(request, *args, **kwargs):
"""Sends a reminder mail for a given StudentProject and Survey.
A reminder is only send if no record is on file for the given Survey and
StudentProject.
Expects the following to be present in the POST dict:
survey_key: specifies the key name for the ProjectSurvey to send reminders
for
survey_type: either project or grading depending on the type of Survey
project_key: key which specifies the project to send a reminder for
Args:
request: Django Request object
"""
from soc.logic import mail_dispatcher
from soc.logic.models.site import logic as site_logic
from soc.views.helper import redirects
from soc.modules.gsoc.logic.models.org_admin import logic as org_admin_logic
from soc.modules.gsoc.logic.models.student_project import logic as \
student_project_logic
from soc.modules.gsoc.logic.models.survey import grading_logic
from soc.modules.gsoc.logic.models.survey import project_logic
post_dict = request.POST
project_key = post_dict.get('project_key')
survey_key = post_dict.get('survey_key')
survey_type = post_dict.get('survey_type')
if not (project_key and survey_key and survey_type):
# invalid task data, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid sendSurveyReminderForProject data: %s' % post_dict)
# set logic depending on survey type specified in POST
if survey_type == 'project':
survey_logic = project_logic
elif survey_type == 'grading':
survey_logic = grading_logic
# retrieve the project and survey
student_project = student_project_logic.getFromKeyName(project_key)
if not student_project:
# no existing project found, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid project specified %s:' % project_key)
survey = survey_logic.getFromKeyName(survey_key)
if not survey:
# no existing survey found, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid survey specified %s:' % survey_key)
# try to retrieve an existing record
record_logic = survey_logic.getRecordLogic()
fields = {'project': student_project,
'survey': survey}
record_entity = record_logic.getForFields(fields, unique=True)
if not record_entity:
# send reminder email because we found no record
student_entity = student_project.student
site_entity = site_logic.getSingleton()
if survey_type == 'project':
survey_redirect = redirects.getTakeSurveyRedirect(
survey,{'url_name': 'gsoc/project_survey'})
to_role = student_entity
mail_template = 'soc/project_survey/mail/reminder_gsoc.html'
elif survey_type == 'grading':
survey_redirect = redirects.getTakeSurveyRedirect(
survey,{'url_name': 'gsoc/grading_project_survey'})
to_role = student_project.mentor
mail_template = 'soc/grading_project_survey/mail/reminder_gsoc.html'
survey_url = "http://%(host)s%(redirect)s" % {
'redirect': survey_redirect,
'host': system.getHostname(),
}
# set the context for the mail template
mail_context = {
'student_name': student_entity.name(),
'project_title': student_project.title,
'survey_url': survey_url,
'survey_end': survey.survey_end,
'to_name': to_role.name(),
'site_name': site_entity.site_name,
}
# set the sender
(sender, sender_address) = mail_dispatcher.getDefaultMailSender()
mail_context['sender'] = sender_address
# set the receiver and subject
mail_context['to'] = to_role.email
mail_context['subject'] = 'Evaluation Survey "%s" Reminder' %(survey.title)
# find all org admins for the project's organization
org_entity = student_project.scope
fields = {'scope': org_entity,
'status': 'active'}
org_admin_entities = org_admin_logic.getForFields(fields)
# collect email addresses for all found org admins
org_admin_addresses = []
for org_admin_entity in org_admin_entities:
org_admin_addresses.append(org_admin_entity.email)
if org_admin_addresses:
mail_context['cc'] = org_admin_addresses
# send out the email
mail_dispatcher.sendMailFromTemplate(mail_template, mail_context)
# return OK
return http.HttpResponse()
| |
# -*- coding: UTF-8 -*-
from abc import ABCMeta
from abc import abstractmethod
from . import common
import datetime
import hashlib
import logging
import tweepy
import dbm
import tempfile
import collections
# Module logger
logger = logging.getLogger(__name__)
class ExecutorFactory(common.loggable):
"""
Factory responsible to create
an :class:`twitter_monitor.core.Executor` instance.
:param routines: A list of :class:`twitter_monitor.core.Routine`
subclasses (**not instances**)
:param twitter_keys: A dictionary with api twitter keys. It must have these
keys (you can manage all of them on https://apps.twitter.com/).
- consumer_key
- consumer_secret
- access_token_key
- access_token_secret
:param setup_default_logger: If True (default) it'll setup the root logger.
"""
def __init__(self, routines,
twitter_keys, setup_default_logger=True):
self.routines = routines
self.twitter_keys = twitter_keys
self.setup_default_logger = setup_default_logger
def create_default(self):
"""
Create a default Executor and setup a default logger.
"""
self._setup_logger()
self.logger.debug("Creating a default Executor")
notifier = self._create_notifier(self._create_twitter_api())
executor = Executor(
notifier, self.routines, self._create_key_value_store())
return executor
def _setup_logger(self):
"""
Setup module logger to show processing messages.
"""
if not self.setup_default_logger:
return
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger("")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def _create_twitter_api(self):
self.logger.debug("Creating a twitter api")
ta = self.twitter_keys
self.logger.debug("Consumer_key: " + ta["consumer_key"])
self.logger.debug("Consumer_secret: " + ta["consumer_secret"])
self.logger.debug("Access_token_key: " + ta["access_token_key"])
self.logger.debug("Access_token_secret: " + ta["access_token_secret"])
auth = tweepy.OAuthHandler(
ta['consumer_key'], ta['consumer_secret'])
auth.set_access_token(
ta['access_token_key'], ta['access_token_secret'])
return tweepy.API(auth)
def _create_notifier(self, twitter_api):
n = Notifier(twitter_api)
return n
def _create_key_value_store(self):
try:
name = ".twitter-monitor-info"
filename = "{}/{}".format(tempfile.gettempdir(), name)
return dbm.open(filename, "c")
except Exception as e:
pass
return None
class Executor(common.loggable):
"""
Responsible to run routines. This class must receive
an instance of :class:`twitter_monitor.core.Notifier` class
and an array of routines.
:param notifier: An instance of :class:`twitter_monitor.core.Notifier`
:param routines: A list of :class:`twitter_monitor.core.Routine`
subclasses (**not instances**)
:param key_value_store: A dictionary like storage.
It is used to store info about last
executions (like last execution time).
It is usual to use a simple key store like *anydbm*.
"""
def __init__(self, notifier, routines, key_value_store={}):
self.notifier = notifier
self.routines = routines
self.key_value_store = key_value_store
self._routines_instances = None
def run(self):
"""
Execute all routines. It returns True if all routines are
executed with success.
"""
success = True
try:
for rt in self.routines_instances():
self.logger.info("Running \"{}\"".format(str(rt)))
if not rt.run():
self.logger.error(
"Error on running routine \"{}\"".format(str(rt)))
success = False
self.logger.info("Finished \"{}\"".format(str(rt)))
except Exception as e:
if (hasattr(self.key_value_store, "close")
and isinstance(getattr(self.key_value_store, "close"), collections.Callable)):
self.key_value_store.close()
self.logger.error("Error: " + str(e))
success = False
return success
def routines_instances(self):
"""
Instantiate and return all routines instances.
"""
if self._routines_instances is not None:
return self._routines_instances
self._routines_instances = []
for class_ref in self.routines:
self._routines_instances.append(
class_ref(self.notifier, self.key_value_store))
return self._routines_instances
class Notifier(common.loggable):
"""
It sends a message to destinations (followers) with twitter API.
:param api: An API instance (for now, we are using Tweepy)
"""
def __init__(self, api):
self._api = api
# Cache dos seguidores...
self._followers = None
def send(self, message):
"""
Send a message to all destinations
:param message: A message to send to all followers.
"""
if not isinstance(message, str):
message = str(message, errors="ignore")
self.logger.debug("Message to send: \"{}\"".format(message))
if len(message.strip()) == 0:
# @todo - Change this to exception
self.logger.warn("Empty message")
return
for follower in self._get_followers():
self.logger.info("Sending message to \"{}\": \"{}\"".format(
follower.screen_name, message))
self._api.send_direct_message(user_id=follower.id, text=message)
def _get_followers(self):
if self._followers is None:
self._followers = self._api.followers()
return self._followers
class Routine(common.loggable, metaclass=ABCMeta):
"""
Routine representation
:param notifier: An instance of :class:`twitter_monitor.core.Notifier`
:param key_value_store: A dictionary like storage.
It is used to store info about last
executions (like last execution time).
It is usual to use a simple key store like *anydbm*.
"""
name = None #: Routine full name.
short_name = None #: Routine short name (it'll be used in the message).
interval_minutes = None #: Interval (in minutes) to execute routine
_file_last_execution = None #: Cache of last execution file content
def __init__(self, notifier, key_value_store={}):
self.notifier = notifier
self.key_value_store = key_value_store
if self.name is None:
self.name = self.__class__.__name__
if self.short_name is None:
self.short_name = self.__class__.__name__
def run(self):
"""
Run this routine
"""
if self._skip_execution():
self.logger.info("Skipping execution")
return True
if self._execute():
self._set_last_execution()
return True
return False
def _skip_execution(self):
if self.interval_minutes is None or self.last_execution is None:
return False
timedelta_compare = datetime.timedelta(minutes=self.interval_minutes)
diff = datetime.datetime.now() - self.last_execution
if diff < timedelta_compare:
message = "Interval not reached. Elapsed {} minutes"
self.logger.info(message.format(diff.seconds/60))
return True
return False
@abstractmethod
def _execute(self):
"""
Put your code here in your subclasses.
Must be implemented by subclasses.
Use self.notify('Some message') to send a message to recepients.
"""
return NotImplemented
def clear_last_execution(self):
"""
Clear last execution time of this routine
"""
self._set_last_execution("")
@property
def last_execution(self):
"""
Returns a datetime object with last execution of routine
"""
self.logger.debug("Finding last execution time")
try:
if self.uid in self.key_value_store:
val = self.key_value_store[self.uid]
return datetime.datetime.strptime(val, "%Y-%m-%d %H:%M:%S.%f")
except Exception as e:
self.logger.debug(
"Exception - Method/property 'last_execution': " + str(e))
return None
def _set_last_execution(self, val=None):
now_str = datetime.datetime.now().isoformat(" ")
now_str = now_str if val is None else val
self.logger.debug(
"Setting last execution file content to: '{}'".format(now_str))
self.key_value_store[self.uid] = now_str
@property
def uid(self):
"""
Routine unique id (md5 format)
"""
name = "{} {} {}".format(
self.__class__.__name__, self.name, self.short_name)
m = hashlib.md5()
m.update(name.encode("ascii", errors="ignore"))
return m.hexdigest()
def notify(self, message):
"""
Send the message
"""
if not isinstance(message, str):
message = str(message)
if len(message.strip()) == 0:
self.logger.debug("Empty message")
return
new_message = "{}: {}".format(
self.short_name, message)
self.notifier.send(new_message)
def __str__(self):
return "Routine '{}'".format(self.name)
| |
import json
from datetime import datetime, timedelta
from django.contrib.contenttypes.models import ContentType
from nose.tools import eq_
from kitsune import search as constants
from kitsune.access.tests import permission
from kitsune.forums.tests import forum, post, restricted_forum, thread
from kitsune.products.tests import product, topic
from kitsune.questions.tests import question, answer, answervote, questionvote
from kitsune.search.tests.test_es import ElasticTestCase
from kitsune.sumo.tests import LocalizingClient
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.tests import group, user
from kitsune.wiki.tests import document, revision, helpful_vote
class AdvancedSearchTests(ElasticTestCase):
client_class = LocalizingClient
def test_json_format(self):
"""JSON without callback should return application/json"""
response = self.client.get(reverse('search'), {
'q': 'bookmarks',
'a': '1',
'format': 'json',
}, follow=True)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/json')
def test_json_callback_validation(self):
"""Various json callbacks -- validation"""
response = self.client.get(reverse('search'), {
'q': 'bookmarks',
'a': '1',
'format': 'json',
'callback': 'callback',
}, follow=True)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/x-javascript')
def test_json_empty_query_a_1(self):
"""Empty query returns JSON format"""
response = self.client.get(reverse('search'), {
'format': 'json', 'a': 1,
}, follow=True)
eq_(response.status_code, 200)
eq_(response['Content-Type'], 'application/json')
def test_json_empty_query_a_2(self):
"""Empty query asking for form returns 400"""
# Test with flags for advanced search or not
response = self.client.get(reverse('search'), {
'format': 'json', 'a': 2,
}, follow=True)
eq_(response.status_code, 400)
eq_(response['Content-Type'], 'application/json')
def test_search_products(self):
p = product(title=u'Product One', slug='product', save=True)
doc1 = document(title=u'cookies', locale='en-US', category=10,
save=True)
revision(document=doc1, is_approved=True, save=True)
doc1.products.add(p)
doc1.save()
self.refresh()
response = self.client.get(
reverse('search.advanced'),
{'a': '1', 'product': 'product', 'q': 'cookies', 'w': '1'})
assert "We couldn't find any results for" not in response.content
eq_(200, response.status_code)
assert 'Product One' in response.content
def test_search_multiple_products(self):
p = product(title=u'Product One', slug='product-one', save=True)
p2 = product(title=u'Product Two', slug='product-two', save=True)
doc1 = document(title=u'cookies', locale='en-US', category=10,
save=True)
revision(document=doc1, is_approved=True, save=True)
doc1.products.add(p)
doc1.products.add(p2)
doc1.save()
self.refresh()
response = self.client.get(reverse('search.advanced'), {
'a': '1',
'product': ['product-one', 'product-two'],
'q': 'cookies',
'w': '1',
})
assert "We couldn't find any results for" not in response.content
eq_(200, response.status_code)
assert 'Product One, Product Two' in response.content
def test_wiki_no_query(self):
"""Tests advanced search with no query"""
doc = document(locale=u'en-US', category=10, save=True)
doc.tags.add(u'desktop')
revision(document=doc, is_approved=True, save=True)
self.refresh()
response = self.client.get(reverse('search.advanced'), {
'q': '', 'tags': 'desktop', 'w': '1', 'a': '1',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
def test_questions_sortby(self):
"""Tests advanced search for questions with a sortby"""
question(title=u'tags tags tags', save=True)
self.refresh()
# Advanced search for questions with sortby set to 3 which is
# '-replies' which is different between Sphinx and ES.
response = self.client.get(reverse('search.advanced'), {
'q': 'tags', 'tags': 'desktop', 'w': '2', 'a': '1', 'sortby': '3',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
def test_sortby_documents_helpful(self):
"""Tests advanced search with a sortby_documents by helpful"""
r1 = revision(is_approved=True, save=True)
r2 = revision(is_approved=True, save=True)
helpful_vote(revision=r2, helpful=True, save=True)
# Note: We have to wipe and rebuild the index because new
# helpful_votes don't update the index data.
self.setup_indexes()
self.reindex_and_refresh()
# r2.document should come first with 1 vote.
response = self.client.get(reverse('search.advanced'), {
'w': '1', 'a': '1', 'sortby_documents': 'helpful',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(r2.document.title, content['results'][0]['title'])
# Vote twice on r1, now it should come first.
helpful_vote(revision=r1, helpful=True, save=True)
helpful_vote(revision=r1, helpful=True, save=True)
self.setup_indexes()
self.reindex_and_refresh()
response = self.client.get(reverse('search.advanced'), {
'w': '1', 'a': '1', 'sortby_documents': 'helpful',
'format': 'json'})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(r1.document.title, content['results'][0]['title'])
def test_questions_num_votes(self):
"""Tests advanced search for questions num_votes filter"""
q = question(title=u'tags tags tags', save=True)
# Add two question votes
questionvote(question=q, save=True)
questionvote(question=q, save=True)
self.refresh()
# Advanced search for questions with num_votes > 5. The above
# question should be not in this set.
response = self.client.get(reverse('search.advanced'), {
'q': '', 'tags': 'desktop', 'w': '2', 'a': '1',
'num_voted': 2, 'num_votes': 5,
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 0)
# Advanced search for questions with num_votes < 1. The above
# question should be not in this set.
response = self.client.get(reverse('search.advanced'), {
'q': '', 'tags': 'desktop', 'w': '2', 'a': '1',
'num_voted': 1, 'num_votes': 1,
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 0)
def test_num_votes_none(self):
"""Tests num_voted filtering where num_votes is ''"""
q = question(save=True)
questionvote(question=q, save=True)
self.refresh()
qs = {'q': '', 'w': 2, 'a': 1, 'num_voted': 2, 'num_votes': ''}
response = self.client.get(reverse('search.advanced'), qs)
eq_(200, response.status_code)
def test_forums_search(self):
"""This tests whether forum posts show up in searches"""
thread1 = thread(title=u'crash', save=True)
post(thread=thread1, save=True)
self.refresh()
response = self.client.get(reverse('search.advanced'), {
'author': '', 'created': '0', 'created_date': '',
'updated': '0', 'updated_date': '', 'sortby': '0',
'a': '1', 'w': '4', 'q': 'crash',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
def test_forums_search_authorized_forums(self):
"""Only authorized people can search certain forums"""
# Create two threads: one in a restricted forum and one not.
forum1 = forum(name=u'ou812forum', save=True)
thread1 = thread(forum=forum1, save=True)
post(thread=thread1, content=u'audio', save=True)
forum2 = restricted_forum(name=u'restrictedkeepout', save=True)
thread2 = thread(forum=forum2, save=True)
post(thread=thread2, content=u'audio restricted', save=True)
self.refresh()
# Do a search as an anonymous user but don't specify the
# forums to filter on. Should only see one of the posts.
response = self.client.get(reverse('search.advanced'), {
'author': '',
'created': '0',
'created_date': '',
'updated': '0',
'updated_date': '',
'sortby': '0',
'a': '1',
'w': '4',
'q': 'audio',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
# Do a search as an authorized user but don't specify the
# forums to filter on. Should see both posts.
u = user(save=True)
g = group(save=True)
g.user_set.add(u)
ct = ContentType.objects.get_for_model(forum2)
permission(codename='forums_forum.view_in_forum', content_type=ct,
object_id=forum2.id, group=g, save=True)
self.client.login(username=u.username, password='testpass')
response = self.client.get(reverse('search.advanced'), {
'author': '',
'created': '0',
'created_date': '',
'updated': '0',
'updated_date': '',
'sortby': '0',
'a': '1',
'w': '4',
'q': 'audio',
'format': 'json'
})
# Sees both results
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 2)
def test_forums_search_authorized_forums_specifying_forums(self):
"""Only authorized people can search certain forums they specified"""
# Create two threads: one in a restricted forum and one not.
forum1 = forum(name=u'ou812forum', save=True)
thread1 = thread(forum=forum1, save=True)
post(thread=thread1, content=u'audio', save=True)
forum2 = restricted_forum(name=u'restrictedkeepout', save=True)
thread2 = thread(forum=forum2, save=True)
post(thread=thread2, content=u'audio restricted', save=True)
self.refresh()
# Do a search as an anonymous user and specify both
# forums. Should only see the post from the unrestricted
# forum.
response = self.client.get(reverse('search.advanced'), {
'author': '',
'created': '0',
'created_date': '',
'updated': '0',
'updated_date': '',
'sortby': '0',
'forum': [forum1.id, forum2.id],
'a': '1',
'w': '4',
'q': 'audio',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
# Do a search as an authorized user and specify both
# forums. Should see both posts.
u = user(save=True)
g = group(save=True)
g.user_set.add(u)
ct = ContentType.objects.get_for_model(forum2)
permission(codename='forums_forum.view_in_forum', content_type=ct,
object_id=forum2.id, group=g, save=True)
self.client.login(username=u.username, password='testpass')
response = self.client.get(reverse('search.advanced'), {
'author': '',
'created': '0',
'created_date': '',
'updated': '0',
'updated_date': '',
'sortby': '0',
'forum': [forum1.id, forum2.id],
'a': '1',
'w': '4',
'q': 'audio',
'format': 'json'
})
# Sees both results
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 2)
def test_forums_thread_created(self):
"""Tests created/created_date filtering for forums"""
post_created_ds = datetime(2010, 1, 1, 12, 00)
thread1 = thread(title=u'crash', created=post_created_ds, save=True)
post(thread=thread1,
created=(post_created_ds + timedelta(hours=1)),
save=True)
self.refresh()
# The thread/post should not show up in results for items
# created AFTER 1/12/2010.
response = self.client.get(reverse('search.advanced'), {
'author': '', 'created': '2', 'created_date': '01/12/2010',
'updated': '0', 'updated_date': '', 'sortby': '0',
'a': '1', 'w': '4', 'q': 'crash',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 0)
# The thread/post should show up in results for items created
# AFTER 1/1/2010.
response = self.client.get(reverse('search.advanced'), {
'author': '', 'created': '2', 'created_date': '01/01/2010',
'updated': '0', 'updated_date': '', 'sortby': '0',
'a': '1', 'w': '4', 'q': 'crash',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
# The thread/post should show up in results for items created
# BEFORE 1/12/2010.
response = self.client.get(reverse('search.advanced'), {
'author': '', 'created': '1', 'created_date': '01/12/2010',
'updated': '0', 'updated_date': '', 'sortby': '0',
'a': '1', 'w': '4', 'q': 'crash',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
# The thread/post should NOT show up in results for items
# created BEFORE 12/31/2009.
response = self.client.get(reverse('search.advanced'), {
'author': '', 'created': '1', 'created_date': '12/31/2009',
'updated': '0', 'updated_date': '', 'sortby': '0',
'a': '1', 'w': '4', 'q': 'crash',
'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 0)
def test_multi_word_tag_search(self):
"""Tests searching for tags with spaces in them"""
ques = question(title=u'audio', save=True)
ques.tags.add(u'Windows 7')
self.refresh()
response = self.client.get(reverse('search.advanced'), {
'q': 'audio', 'q_tags': 'Windows 7', 'w': '2', 'a': '1',
'sortby': '0', 'format': 'json'
})
eq_(200, response.status_code)
content = json.loads(response.content)
eq_(content['total'], 1)
def test_category_invalid(self):
"""Tests passing an invalid category"""
# wiki and questions
ques = question(title=u'q1 audio', save=True)
ques.tags.add(u'desktop')
ans = answer(question=ques, save=True)
answervote(answer=ans, helpful=True, save=True)
d1 = document(title=u'd1 audio', locale=u'en-US', category=10,
is_archived=False, save=True)
d1.tags.add(u'desktop')
revision(document=d1, is_approved=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 3, 'format': 'json', 'category': 'invalid'}
response = self.client.get(reverse('search.advanced'), qs)
eq_(2, json.loads(response.content)['total'])
def test_created(self):
"""Basic functionality of created filter."""
created_ds = datetime(2010, 6, 19, 12, 00)
# on 6/19/2010
q1 = question(title=u'q1 audio', created=created_ds, save=True)
q1.tags.add(u'desktop')
ans = answer(question=q1, save=True)
answervote(answer=ans, helpful=True, save=True)
# on 6/21/2010
q2 = question(title=u'q2 audio',
created=(created_ds + timedelta(days=2)),
save=True)
q2.tags.add(u'desktop')
ans = answer(question=q2, save=True)
answervote(answer=ans, helpful=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 2, 'format': 'json',
'sortby': 2, 'created_date': '06/20/2010'}
qs['created'] = constants.INTERVAL_BEFORE
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_([q1.get_absolute_url()], [r['url'] for r in results])
qs['created'] = constants.INTERVAL_AFTER
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_([q2.get_absolute_url()], [r['url'] for r in results])
def test_sortby_invalid(self):
"""Invalid created_date is ignored."""
qs = {'a': 1, 'w': 4, 'format': 'json', 'sortby': ''}
response = self.client.get(reverse('search.advanced'), qs)
eq_(200, response.status_code)
def test_created_date_invalid(self):
"""Invalid created_date is ignored."""
thread1 = thread(save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json',
'created': constants.INTERVAL_AFTER,
'created_date': 'invalid'}
response = self.client.get(reverse('search.advanced'), qs)
eq_(1, json.loads(response.content)['total'])
def test_created_date_nonexistent(self):
"""created is set while created_date is left out of the query."""
qs = {'a': 1, 'w': 2, 'format': 'json', 'created': 1}
response = self.client.get(reverse('search.advanced'), qs)
eq_(200, response.status_code)
def test_updated_invalid(self):
"""Invalid updated_date is ignored."""
thread1 = thread(save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json',
'updated': 1, 'updated_date': 'invalid'}
response = self.client.get(reverse('search.advanced'), qs)
eq_(1, json.loads(response.content)['total'])
def test_updated_nonexistent(self):
"""updated is set while updated_date is left out of the query."""
thread1 = thread(save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 2, 'format': 'json', 'updated': 1}
response = self.client.get(reverse('search.advanced'), qs)
eq_(response.status_code, 200)
def test_asked_by(self):
"""Check several author values, including test for (anon)"""
author_vals = (
('DoesNotExist', 0),
('jsocol', 2),
('pcraciunoiu', 2),
)
# Set up all the question data---creats users, creates the
# questions, shove it all in the index, then query it and see
# what happens.
for name, number in author_vals:
u = user(username=name, save=True)
for i in range(number):
ques = question(title=u'audio', creator=u, save=True)
ques.tags.add(u'desktop')
ans = answer(question=ques, save=True)
answervote(answer=ans, helpful=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 2, 'format': 'json'}
for author, total in author_vals:
qs.update({'asked_by': author})
response = self.client.get(reverse('search.advanced'), qs)
eq_(total, json.loads(response.content)['total'])
def test_question_topics(self):
"""Search questions for topics."""
p = product(save=True)
t1 = topic(slug='doesnotexist', product=p, save=True)
t2 = topic(slug='cookies', product=p, save=True)
t3 = topic(slug='sync', product=p, save=True)
question(topic=t2, save=True)
question(topic=t2, save=True)
question(topic=t3, save=True)
self.refresh()
topic_vals = (
(t1.slug, 0),
(t2.slug, 2),
(t3.slug, 1),
)
qs = {'a': 1, 'w': 2, 'format': 'json'}
for topics, number in topic_vals:
qs.update({'topics': topics})
response = self.client.get(reverse('search.advanced'), qs)
eq_(number, json.loads(response.content)['total'])
def test_wiki_topics(self):
"""Search wiki for topics, includes multiple."""
t1 = topic(slug='doesnotexist', save=True)
t2 = topic(slug='extant', save=True)
t3 = topic(slug='tagged', save=True)
doc = document(locale=u'en-US', category=10, save=True)
doc.topics.add(t2)
revision(document=doc, is_approved=True, save=True)
doc = document(locale=u'en-US', category=10, save=True)
doc.topics.add(t2)
doc.topics.add(t3)
revision(document=doc, is_approved=True, save=True)
self.refresh()
topic_vals = (
(t1.slug, 0),
(t2.slug, 2),
(t3.slug, 1),
([t2.slug, t3.slug], 1),
)
qs = {'a': 1, 'w': 1, 'format': 'json'}
for topics, number in topic_vals:
qs.update({'topics': topics})
response = self.client.get(reverse('search.advanced'), qs)
eq_(number, json.loads(response.content)['total'])
def test_wiki_topics_inherit(self):
"""Translations inherit topics from their parents."""
doc = document(locale=u'en-US', category=10, save=True)
doc.topics.add(topic(slug='extant', save=True))
revision(document=doc, is_approved=True, save=True)
translated = document(locale=u'es', parent=doc, category=10,
save=True)
revision(document=translated, is_approved=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 1, 'format': 'json', 'topics': 'extant'}
response = self.client.get(reverse('search.advanced', locale='es'), qs)
eq_(1, json.loads(response.content)['total'])
def test_question_products(self):
"""Search questions for products."""
p1 = product(slug='b2g', save=True)
p2 = product(slug='mobile', save=True)
p3 = product(slug='desktop', save=True)
question(product=p2, save=True)
question(product=p2, save=True)
question(product=p3, save=True)
self.refresh()
product_vals = (
(p1.slug, 0),
(p2.slug, 2),
(p3.slug, 1),
)
qs = {'a': 1, 'w': 2, 'format': 'json'}
for products, number in product_vals:
qs.update({'product': products})
response = self.client.get(reverse('search.advanced'), qs)
eq_(number, json.loads(response.content)['total'])
def test_wiki_products(self):
"""Search wiki for products."""
prod_vals = (
(product(slug='b2g', save=True), 0),
(product(slug='mobile', save=True), 1),
(product(slug='desktop', save=True), 2),
)
for prod, total in prod_vals:
for i in range(total):
doc = document(locale=u'en-US', category=10, save=True)
doc.products.add(prod)
revision(document=doc, is_approved=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 1, 'format': 'json'}
for prod, total in prod_vals:
qs.update({'product': prod.slug})
response = self.client.get(reverse('search.advanced'), qs)
eq_(total, json.loads(response.content)['total'])
def test_wiki_products_inherit(self):
"""Translations inherit products from their parents."""
doc = document(locale=u'en-US', category=10, save=True)
p = product(title=u'Firefox', slug=u'desktop', save=True)
doc.products.add(p)
revision(document=doc, is_approved=True, save=True)
translated = document(locale=u'fr', parent=doc, category=10,
save=True)
revision(document=translated, is_approved=True, save=True)
self.refresh()
qs = {'a': 1, 'w': 1, 'format': 'json', 'product': p.slug}
response = self.client.get(reverse('search.advanced', locale='fr'), qs)
eq_(1, json.loads(response.content)['total'])
def test_discussion_filter_author(self):
"""Filter by author in discussion forums."""
author_vals = (
('DoesNotExist', 0),
('admin', 1),
('jsocol', 4),
)
for name, number in author_vals:
u = user(username=name, save=True)
for i in range(number):
thread1 = thread(title=u'audio', save=True)
post(thread=thread1, author=u, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json'}
for author, total in author_vals:
qs.update({'author': author})
response = self.client.get(reverse('search.advanced'), qs)
eq_(total, json.loads(response.content)['total'])
def test_discussion_filter_sticky(self):
"""Filter for sticky threads."""
thread1 = thread(title=u'audio', is_locked=True, is_sticky=True,
save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json', 'thread_type': 1, 'forum': 1}
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_(len(results), 1)
def test_discussion_filter_locked(self):
"""Filter for locked threads."""
thread1 = thread(title=u'audio', is_locked=True,
save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json', 'thread_type': 2}
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_(len(results), 1)
def test_discussion_filter_sticky_locked(self):
"""Filter for locked and sticky threads."""
thread1 = thread(title=u'audio', is_locked=True, is_sticky=True,
save=True)
post(thread=thread1, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json', 'thread_type': (1, 2)}
response = self.client.get(reverse('search.advanced'), qs)
result = json.loads(response.content)['results'][0]
eq_(thread1.get_absolute_url(), result['url'])
def test_forums_filter_updated(self):
"""Filter for updated date."""
post_updated_ds = datetime(2010, 5, 3, 12, 00)
thread1 = thread(title=u't1 audio', save=True)
post(thread=thread1, created=post_updated_ds, save=True)
thread2 = thread(title=u't2 audio', save=True)
post(thread=thread2,
created=(post_updated_ds + timedelta(days=2)),
save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json',
'sortby': 1, 'updated_date': '05/04/2010'}
qs['updated'] = constants.INTERVAL_BEFORE
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_([thread1.get_absolute_url()], [r['url'] for r in results])
qs['updated'] = constants.INTERVAL_AFTER
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_([thread2.get_absolute_url()], [r['url'] for r in results])
def test_archived(self):
"""Ensure archived articles show only when requested."""
doc = document(title=u'impalas', locale=u'en-US',
is_archived=True, save=True)
revision(document=doc, summary=u'impalas',
is_approved=True, save=True)
self.refresh()
# include_archived gets the above document
qs = {'q': 'impalas', 'a': 1, 'w': 1, 'format': 'json',
'include_archived': 'on'}
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_(1, len(results))
# no include_archived gets you nothing since the only
# document in the index is archived
qs = {'q': 'impalas', 'a': 0, 'w': 1, 'format': 'json'}
response = self.client.get(reverse('search.advanced'), qs)
results = json.loads(response.content)['results']
eq_(0, len(results))
def test_discussion_filter_forum(self):
"""Filter by forum in discussion forums."""
forum1 = forum(name=u'Forum 1', save=True)
thread1 = thread(forum=forum1, title=u'audio 1', save=True)
post(thread=thread1, save=True)
forum2 = forum(name=u'Forum 2', save=True)
thread2 = thread(forum=forum2, title=u'audio 2', save=True)
post(thread=thread2, save=True)
self.refresh()
qs = {'a': 1, 'w': 4, 'format': 'json'}
for forum_id in (forum1.id, forum2.id):
qs['forum'] = int(forum_id)
response = self.client.get(reverse('search.advanced'), qs)
eq_(json.loads(response.content)['total'], 1)
def test_discussion_forum_with_restricted_forums(self):
"""Tests who can see restricted forums in search form."""
# This is a long test, but it saves us from doing the setup
# twice.
forum1 = forum(name=u'ou812forum', save=True)
thread1 = thread(forum=forum1, title=u'audio 2', save=True)
post(thread=thread1, save=True)
forum2 = restricted_forum(name=u'restrictedkeepout', save=True)
thread2 = thread(forum=forum2, title=u'audio 2', save=True)
post(thread=thread2, save=True)
self.refresh()
# Get the Advanced Search Form as an anonymous user
response = self.client.get(reverse('search.advanced'), {'a': '2'})
eq_(200, response.status_code)
# Regular forum should show up
assert 'ou812forum' in response.content
# Restricted forum should not show up
assert 'restrictedkeepout' not in response.content
u = user(save=True)
g = group(save=True)
g.user_set.add(u)
ct = ContentType.objects.get_for_model(forum2)
permission(codename='forums_forum.view_in_forum', content_type=ct,
object_id=forum2.id, group=g, save=True)
# Get the Advanced Search Form as a logged in user
self.client.login(username=u.username, password='testpass')
response = self.client.get(reverse('search.advanced'), {'a': '2'})
eq_(200, response.status_code)
# Both forums should show up for authorized user
assert 'ou812forum' in response.content
assert 'restrictedkeepout' in response.content
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon May 8 03:25:33 2017
@author: alex
"""
#==============================================================================
# This is the algorithm described in the documentation
# But because of the our own problems, we have to split up the process
# and use another offline version
# see train_gpu.py
#==============================================================================
import sys, os
sys.path.append('../')
sys.path.append('/home/share/minghan/malmo-challenge/')
sys.path.append('/home/share/minghan/keras/lib/python2.7/site-packages')
import msgpack
import tensorflow as tf
import numpy as np
import random
from malmopy.agent import BaseAgent
from collections import deque, namedtuple
from common import ENV_ACTIONS
from heapq import heapify, heappop, heappush
BATCH_SIZE = 32# default batch size
MEM_SIZE = 1000000# the size of replay buffer
REPLAY_START = 10000
GAMMA = 0.99# discounting factor
EPSILON = 0.25# the probability of agent tacking random action
GOAL_DIMS = 27# the dimensions of the location of the goal, which are the central 25 blocks plus the 2 lapis block
STATE_DIMS = 9# the dimensions of the state feature vector
class BayesAgent(BaseAgent):
def __init__(self, name, target, collaborator, is_training, sess, visualizer=None):
self.actions = [0, 1, 2]#the available actions
self.goal = np.array([[0,0]])# the location of the goal
self.obs = None # store the last obs, use to update the collaborator
self.collaborator = 20*np.ones((1,3))#encode the collaborator's behavior
self.c_prob = 1.*self.collaborator/(np.sum(self.collaborator, axis = 1)[0])#normalized collaborator vector
super(BayesAgent, self).__init__(str(name), len(self.actions), visualizer)
self._target = str(target)
self._collaborator = str(collaborator)
self.sess = sess
self.state_dims = STATE_DIMS
self.goal_dims = GOAL_DIMS
self.replay_buffer = deque()
self.restore_replay_buffer(is_training)
self.state_buffer = []# use to temporarily store the state
self.actor = Actor(str(name))
self.critic = Critic(self.state_dims, self.goal_dims, sess)
self.save = self.critic.save# check if there's a saved model
def reset(self, obs):
self.state_buffer = []
self.collaborator = self.particle_filtering() # resampling
self.c_prob = 1.*self.collaborator/(np.sum(self.collaborator, axis = 1)[0])
print "Replay buffer length:", len(self.replay_buffer)
def act(self, obs, reward, done, is_training = False):
state = self.state_shaping(obs)
a_state = np.concatenate([state, self.c_prob], 1)
self.update_collaborator(obs, reward)
if is_training:
self.goal = np.array([[state[0][3], state[0][4]]])
rand = random.random()
if rand > EPSILON:#epsilon greedy
action = self.actor.get_action(obs, reward, done, self.goal, is_training)
else:
action = random.choice([0,1,2])
else:
if self.c_prob[0][2] > 0.5:# use particle filter
if self.manhattan_dist((state[0][0], state[0][1]), (1,4)) > self.manhattan_dist((state[0][0], state[0][1]), (7,4)):
return self.actor.get_action(obs, reward, done, [[7,4]], is_training)
else:
return self.actor.get_action(obs, reward, done, [[1,4]], is_training)
me = self.actor.Neighbour(1, state[0][0], state[0][1], 0, "")
target = self.actor.Neighbour(1, state[0][2], state[0][3], 0, "")
path, _ = self.actor._find_shortest_path(me, target, state=obs[0])
if 2 <= len(path) <= 4:
n = path[-2]
if self.manhattan_dist((int(state[0][2]), int(state[0][3])), (n.x,n.z)) == 1 \
and self.manhattan_dist((int(state[0][4]), int(state[0][5])), (n.x,n.z)) == 2:
action = self.actor.get_action(obs, reward, done, [[n.x, n.z]], is_training)
#print "Action:", action
return action
self.goal = self.goal_shaping(self.critic.get_goal(a_state))
action = self.actor.get_action(obs, reward, done, self.goal, is_training)
return action
def training(self, obs, action, reward, next_obs, done, step, s):
state = self.state_shaping(obs)
next_state = self.state_shaping(next_obs)
r = self.reward_shaping(next_state, reward)
a_state = np.concatenate([state, self.c_prob], 1)
a_next_state = np.concatenate([next_state, self.c_prob], 1)
# Goal Swapping
goal_swap = self.reverse_goal_shaping(np.array([[next_state[0][0], next_state[0][1]]]))#use the current location as the goal
self.state_buffer.append(a_state)
for i in range(step):
r_sum = 0
for j in range(0, step-i-1):
r_sum += -1.0*(GAMMA**j) # summing the discounted reward from the start to the current state for every state in the state buffer
self.store_transition([self.state_buffer[i].tolist(), goal_swap, \
r_sum+(GAMMA**(step-i-1))*r, a_next_state.tolist(), GAMMA**step])
if len(self.replay_buffer) > REPLAY_START:
batch = self.sampling()
l = self.update_model(batch)
if s%1 == 0:
print "Epoch:%d, loss: %d"%(s, l)
if s % 1000 == 0:# update the target network every 10000 steps
print "Update Target Network"
self.critic.update_target()
if s % 50 == 0:
self.critic.save_model()
def matches(self, a, b):
return a.x == b.x and a.z == b.z # victory!!
#==============================================================================
# Particle filter: update beliefs
# Collaborator's Update Method
# we categorize the type of agent as three classes: [Focused, Random, Bad], in which Bad means tends to move to the lapis block
# and at every step, we increase the corresponding one based on the collaborator's behavior
# It's hard to specify what's the behavior of an agent
# so we do a lot of handcraft to specify the rules
# it's noisy but still can be a prior
# for example, a focused agent might look like [ 27, 15, 8 ]
#==============================================================================
def reset_collaborator(self):# the particle filter is very sensitive to initial value, so we set it to [20, 20, 20] to change slowly over time
self.collaborator = 20*np.ones((1,3))
self.c_prob = 1.*self.collaborator/(np.sum(self.collaborator, axis = 1)[0])
def update_collaborator(self, next_obs, reward):
if self.obs is None:
self.obs = next_obs
return
state = self.state_shaping(self.obs)
next_state = self.state_shaping(next_obs)
target_pos = (state[0][2], state[0][3])
collaborator_pos = (state[0][4], state[0][5])
new_collaborator_pos = (next_state[0][4], next_state[0][5])
ori_pig_dist = self.manhattan_dist(target_pos, collaborator_pos)# distance between pig and collaborator at last state
new_pig_dist = self.manhattan_dist(target_pos, new_collaborator_pos)# distance between pig and collaborator at this state
ori_leave1_dist = self.manhattan_dist((1, 4), collaborator_pos)# distance between lapis blocks and collaborator
new_leave1_dist = self.manhattan_dist((1, 4), new_collaborator_pos)
ori_leave2_dist = self.manhattan_dist((7, 4), collaborator_pos)
new_leave2_dist = self.manhattan_dist((7, 4), new_collaborator_pos)
#complicated rules, did a lot of handcrafting and tuning
if ori_leave1_dist < ori_leave2_dist and ori_pig_dist < new_pig_dist:
if ori_leave1_dist >= new_leave1_dist:
self.collaborator[0][2] += 1
elif ori_leave1_dist <= new_leave1_dist:
self.collaborator[0][1] += 1
elif ori_leave1_dist > ori_leave2_dist and ori_pig_dist < new_pig_dist:
if ori_leave2_dist >= new_leave2_dist:
self.collaborator[0][2] += 1
elif ori_leave2_dist <= new_leave2_dist:
self.collaborator[0][1] += 1
elif ori_pig_dist > new_pig_dist:
self.collaborator[0][0] += 1
if new_collaborator_pos == (1, 4) or new_collaborator_pos == (7, 4):
self.collaborator[0][2] += 10
#normalize the collaborator behavior vector to get probability
self.c_prob = 1.*self.collaborator/(np.sum(self.collaborator, axis = 1)[0])
self.obs = next_obs
#==============================================================================
# Particle filter: Resampling
# making it more easier for our agent to adapt to the changes of the collaborator
#==============================================================================
def particle_filtering(self):# the particle filter tends to be unstable if samples are not enough
temp = np.zeros((1,3))
#print self.c_prob[0]
for i in range(50):
index = np.random.choice([0,1,2], 1, p=self.c_prob[0])[0]
temp[0][index] += 1
print "Collaborator: ", temp
return temp
#==============================================================================
def update_model(self, batch):
start_batch = batch[0]
goal_batch = batch[1]
reward_batch = batch[2]
end_batch = batch[3]
step_batch = batch[4]
result = self.critic.update(start_batch, goal_batch, reward_batch, end_batch, step_batch)
return result
def save_replay_buffer(self):
with open('replay_buffer.msg','w') as f:
f.truncate()
replay_buffer = {'replay_buffer': list(self.replay_buffer)}
print "Replay Buffer Saved"
msgpack.pack(replay_buffer, f)
def restore_replay_buffer(self, is_training):
if is_training:
if os.path.exists('replay_buffer.msg'):
with open('replay_buffer.msg', 'r') as f:
data = msgpack.unpack(f)
print len(data['replay_buffer'])
print "Loading Replay Buffer"
self.replay_buffer = deque(data['replay_buffer'])
else:
print "Initialize Replay Buffer"
self.replay_buffer = deque()
def store_transition(self, sample):#store the transition in the replay buffer
self.replay_buffer.append(sample)
if len(self.replay_buffer) > MEM_SIZE:
self.replay_buffer.popleft()
return len(self.replay_buffer)
def sampling(self):# sample minibatch from the experience replay buffer
if len(self.replay_buffer) >= BATCH_SIZE:
batch = random.sample(self.replay_buffer,BATCH_SIZE)
start_batch = []
goal_batch = []
reward_batch = []
end_batch = []
step_batch = []
for data in batch:
start_batch.append(data[0])
goal = np.zeros((1, GOAL_DIMS))
goal[0,data[1]] = 1
goal_batch.append(goal)
reward_batch.append(data[2])
end_batch.append(data[3])
step_batch.append(data[4])
return [np.array(start_batch).reshape(BATCH_SIZE, STATE_DIMS), np.array(goal_batch).reshape(BATCH_SIZE, GOAL_DIMS), \
np.array(reward_batch).reshape(BATCH_SIZE, 1), np.array(end_batch).reshape(BATCH_SIZE, STATE_DIMS),\
np.array(step_batch).reshape(BATCH_SIZE, 1)]
def state_shaping(self, obs):#flatten the state to input shape, modified from the sample code of FocusedAgent
state = obs[0]
#My agent position
me = [(j, i) for i, v in enumerate(state) for j, k in enumerate(v) if self.name in k]
#target position
target = [(j, i) for i, v in enumerate(state) for j, k in enumerate(v) if self._target in k]
#opponet postion
collaborator = [(j, i) for i, v in enumerate(state) for j, k in enumerate(v) if self._collaborator in k]
#flatten
if me is [] or target is [] or collaborator is []:
me = [(j, i) for i, v in enumerate(self.obs[0]) for j, k in enumerate(v) if self.name in k]
#target position
target = [(j, i) for i, v in enumerate(self.obs[0]) for j, k in enumerate(v) if self._target in k]
#opponet postion
collaborator = [(j, i) for i, v in enumerate(self.obs[0]) for j, k in enumerate(v) if self._collaborator in k]
inputs = np.array([[me[0][0], me[0][1], target[0][0], target[0][1], \
collaborator[0][0], collaborator[0][1]]])
return inputs
def goal_shaping(self, goal):# shape the index of goal to the coordinate form
if goal < 25:
x = goal % 5 + 2
z = goal / 5 + 2
else:
if goal == 25:
x = 1
z = 4
if goal == 26:
x = 7
z = 4
return np.array([[x,z]])
def reverse_goal_shaping(self, pos):# shape the coordinate of goal to the index form
goal = (pos[0][1]-2)*5 +(pos[0][0]-2)
if goal < 25:
return goal
elif (pos[0][0], pos[0][1]) is (1,4):
return 25
elif (pos[0][0], pos[0][1]) is (7,4):
return 26
def reward_shaping(self, state, reward):# shape the reward
collaborator = (state[0][4], state[0][5])
target = (state[0][2], state[0][3])
if reward > 10: # if the collaborator catch the pig in the lapis block, then our agent will not be rewarded
if collaborator == (2,4) and target == (1, 4):
reward = 0
elif collaborator == (6,4) and target == (7, 4):
reward = 0
return reward
def manhattan_dist(self, a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
class Critic(object):
def __init__(self, state_dims, goal_dims, sess):
self.sess = sess
self.state_dims = state_dims
self.goal_dims = goal_dims
self.build_model()
self.new_pars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='new_network')
self.tar_pars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_network')
self.saver = tf.train.Saver()
checkpoint = tf.train.get_checkpoint_state("saved_critic")
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(sess, checkpoint.model_checkpoint_path)
print "Successfully loaded:", checkpoint.model_checkpoint_path
self.save = True
else:
print "Could not find old Critic network weights"
self.save = False
def build_model(self):# 4 layres, each layer has 1024 neurons with rectifier nonlinearity
def weight_variable(shape):
W = tf.get_variable('W', shape=shape, initializer=tf.contrib.layers.xavier_initializer())
return W
def bias_variable(shape):
initial = tf.constant(0.01, shape = shape)
return tf.Variable(initial)
with tf.variable_scope('inputs'):
self.state_input = tf.placeholder('float32', [None, self.state_dims])
self.y_input = tf.placeholder('float32', [None, ])
self.goal_input = tf.placeholder('float32', [None, self.goal_dims])
with tf.variable_scope('new_network'):
with tf.variable_scope('layer_1'):
self.W_fc1 = weight_variable([self.state_dims,1024])
self.b_fc1 = bias_variable([1024])
self.h_fc1 = tf.nn.relu(tf.matmul(self.state_input,self.W_fc1) + self.b_fc1)
with tf.variable_scope('layer_2'):
self.W_fc2 = weight_variable([1024,1024])
self.b_fc2 = bias_variable([1024])
self.h_fc2 = tf.nn.relu(tf.matmul(self.h_fc1,self.W_fc2) + self.b_fc2)
with tf.variable_scope('layer_3'):
self.W_fc3 = weight_variable([1024,1024])
self.b_fc3 = bias_variable([1024])
self.h_fc3 = tf.nn.relu(tf.matmul(self.h_fc2,self.W_fc3) + self.b_fc3)
with tf.variable_scope('layer_4'):
self.W_fc4 = weight_variable([1024,1024])
self.b_fc4 = bias_variable([1024])
self.h_fc4 = tf.nn.relu(tf.matmul(self.h_fc3,self.W_fc4) + self.b_fc4)
with tf.variable_scope('layer_5'):
self.W_fc5 = weight_variable([1024,self.goal_dims])
self.b_fc5 = bias_variable([self.goal_dims])
self.qvalue = tf.matmul(self.h_fc4,self.W_fc5) +self.b_fc5
self.prediction = tf.reduce_sum(self.qvalue * self.goal_input, axis=1)
with tf.variable_scope('target_network'):
with tf.variable_scope('layer_1'):
self._W_fc1 = weight_variable([self.state_dims,1024])
self._b_fc1 = bias_variable([1024])
self._h_fc1 = tf.nn.relu(tf.matmul(self.state_input,self._W_fc1) + self._b_fc1)
with tf.variable_scope('layer_2'):
self._W_fc2 = weight_variable([1024,1024])
self._b_fc2 = bias_variable([1024])
self._h_fc2 = tf.nn.relu(tf.matmul(self._h_fc1,self._W_fc2) + self._b_fc2)
with tf.variable_scope('layer_3'):
self._W_fc3 = weight_variable([1024,1024])
self._b_fc3 = bias_variable([1024])
self._h_fc3 = tf.nn.relu(tf.matmul(self._h_fc2,self._W_fc3) + self._b_fc3)
with tf.variable_scope('layer_4'):
self._W_fc4 = weight_variable([1024,1024])
self._b_fc4 = bias_variable([1024])
self._h_fc4 = tf.nn.relu(tf.matmul(self._h_fc3,self._W_fc4) + self._b_fc4)
with tf.variable_scope('layer_5'):
self._W_fc5 = weight_variable([1024,self.goal_dims])
self._b_fc5 = bias_variable([self.goal_dims])
self._qvalue = tf.matmul(self._h_fc4,self._W_fc5) +self._b_fc5
self._prediction = tf.reduce_sum(self._qvalue * self.goal_input, axis=1)
with tf.variable_scope('train'):
self.delta = self.y_input - self.prediction
self.loss = tf.reduce_mean(tf.square(self.delta))#mean squared error
self.train_step = tf.train.RMSPropOptimizer(0.00025,0.99,0.0,1e-6).minimize(self.loss)
def get_qvalue(self, state):
qvalue = self.sess.run(self.qvalue, feed_dict = {self.state_input: state})
return qvalue
def get_goal(self, state):
qvalue = self.sess.run(self.qvalue, feed_dict = {self.state_input: state})
#print "Goal:", np.argmax(qvalue, axis = 1)
return np.argmax(qvalue, axis = 1)[0]
def update(self, state, goal, reward, next_state, step):#using double q learning
nqvalue = self.sess.run(self.qvalue, feed_dict = {self.state_input: next_state})
ngoal = np.zeros((BATCH_SIZE,self.goal_dims))
index = np.argmax(nqvalue, axis = 1)
for i in xrange(BATCH_SIZE):
ngoal[i][index[i]] = 1
double_q = self.sess.run(self._prediction, feed_dict = {self.state_input: next_state, self.goal_input: ngoal})
target = reward[:,0] + step[:,0] * double_q
train, l, qvalue = self.sess.run([self.train_step, self.loss, self.qvalue], \
feed_dict = {self.state_input: state, self.goal_input: goal,\
self.y_input: target})
return l
def update_target(self):#update the target network every C steps
self.new_pars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='new_network')
self.tar_pars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_network')
self.sess.run([tf.assign(tar, new) for tar, new in zip(self.tar_pars, self.new_pars)])
def restore_model(self):
self.saver.restore(self.sess, 'saved_critic/critic.ckpt')
print "Critic Model Restored"
def save_model(self):
self.saver.save(self.sess, 'saved_critic/critic.ckpt')
print "Critic Model Saved"
class Actor(object):# modified from the samaple code of FousedAgent provided by Malmo
def __init__(self, name):
self.name = name
self._previous_target_pos = None
self._action_list = []
self.ACTIONS = ENV_ACTIONS
self.Neighbour = namedtuple('Neighbour', ['cost', 'x', 'z', 'direction', 'action'])
def get_action(self, state, reward, done, goal, is_training):
if done:
self._action_list = []
self._previous_target_pos = None
entities = state[1]
state = state[0]
me = [(j, i) for i, v in enumerate(state) for j, k in enumerate(v) if self.name in k]
if len(me) < 1:
return None
me_details = [e for e in entities if e['name'] == self.name][0]
yaw = int(me_details['yaw'])
direction = ((((yaw - 45) % 360) // 90) - 1) % 4 # convert Minecraft yaw to 0=north, 1=east etc.
target = [(goal[0][0], goal[0][1])]
#print "In get action goal:", goal
# Get agent and target nodes
me = self.Neighbour(1, me[0][0], me[0][1], direction, "")
target = self.Neighbour(1, target[0][0], target[0][1], 0, "")
if not self._previous_target_pos == target:
# Target has moved, or this is the first action of a new mission - calculate a new action list
self._previous_target_pos = target
path, costs = self._find_shortest_path(me, target, state=state)
self._action_list = []
for point in path:
self._action_list.append(point.action)
if self._action_list is not None and len(self._action_list) > 0:
action = self._action_list.pop(0)
return self.ACTIONS.index(action)
#print "Action: ", action
# reached end of action list - turn on the spot
return self.ACTIONS.index("turn 1") # substitutes for a no-op command
def _find_shortest_path(self, start, end, **kwargs):
came_from, cost_so_far = {}, {}
explorer = []
heapify(explorer)
heappush(explorer, (0, start))
came_from[start] = None
cost_so_far[start] = 0
current = None
while len(explorer) > 0:
_, current = heappop(explorer)
if self.matches(current, end):
break
for nb in self.neighbors(current, **kwargs):
cost = nb.cost if hasattr(nb, "cost") else 1
new_cost = cost_so_far[current] + cost
if nb not in cost_so_far or new_cost < cost_so_far[nb]:
cost_so_far[nb] = new_cost
priority = new_cost + self.heuristic(end, nb, **kwargs)
heappush(explorer, (priority, nb))
came_from[nb] = current
# build path:
path = deque()
while current is not start:
path.appendleft(current)
current = came_from[current]
return path, cost_so_far
def neighbors(self, pos, state=None):
state_width = state.shape[1]
state_height = state.shape[0]
dir_north, dir_east, dir_south, dir_west = range(4)
neighbors = []
inc_x = lambda x, dir, delta: x + delta if dir == dir_east else x - delta if dir == dir_west else x
inc_z = lambda z, dir, delta: z + delta if dir == dir_south else z - delta if dir == dir_north else z
# add a neighbour for each potential action; prune out the disallowed states afterwards
for action in self.ACTIONS:
if action.startswith("turn"):
neighbors.append(
self.Neighbour(1, pos.x, pos.z, (pos.direction + int(action.split(' ')[1])) % 4, action))
if action.startswith("move "): # note the space to distinguish from movemnorth etc
sign = int(action.split(' ')[1])
weight = 1 if sign == 1 else 1.5
neighbors.append(
self.Neighbour(weight, inc_x(pos.x, pos.direction, sign), inc_z(pos.z, pos.direction, sign),
pos.direction, action))
if action == "movenorth":
neighbors.append(self.Neighbour(1, pos.x, pos.z - 1, pos.direction, action))
elif action == "moveeast":
neighbors.append(self.Neighbour(1, pos.x + 1, pos.z, pos.direction, action))
elif action == "movesouth":
neighbors.append(self.Neighbour(1, pos.x, pos.z + 1, pos.direction, action))
elif action == "movewest":
neighbors.append(self.Neighbour(1, pos.x - 1, pos.z, pos.direction, action))
# now prune:
valid_neighbours = [n for n in neighbors if
n.x >= 0 and n.x < state_width and n.z >= 0 and n.z < state_height and state[
n.z, n.x] != 'sand']
return valid_neighbours
def heuristic(self, a, b, state=None):
(x1, y1) = (a.x, a.z)
(x2, y2) = (b.x, b.z)
return abs(x1 - x2) + abs(y1 - y2)
def matches(self, a, b):
return a.x == b.x and a.z == b.z
| |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2020 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import array
import asyncio
import collections.abc
import unicodedata
from base64 import b64encode
from bisect import bisect_left
import datetime
from email.utils import parsedate_to_datetime
import functools
from inspect import isawaitable as _isawaitable
from operator import attrgetter
import json
import re
import warnings
from .errors import InvalidArgument
from .object import Object
DISCORD_EPOCH = 1420070400000
MAX_ASYNCIO_SECONDS = 3456000
class cached_property:
def __init__(self, function):
self.function = function
self.__doc__ = getattr(function, '__doc__')
def __get__(self, instance, owner):
if instance is None:
return self
value = self.function(instance)
setattr(instance, self.function.__name__, value)
return value
class CachedSlotProperty:
def __init__(self, name, function):
self.name = name
self.function = function
self.__doc__ = getattr(function, '__doc__')
def __get__(self, instance, owner):
if instance is None:
return self
try:
return getattr(instance, self.name)
except AttributeError:
value = self.function(instance)
setattr(instance, self.name, value)
return value
def cached_slot_property(name):
def decorator(func):
return CachedSlotProperty(name, func)
return decorator
class SequenceProxy(collections.abc.Sequence):
"""Read-only proxy of a Sequence."""
def __init__(self, proxied):
self.__proxied = proxied
def __getitem__(self, idx):
return self.__proxied[idx]
def __len__(self):
return len(self.__proxied)
def __contains__(self, item):
return item in self.__proxied
def __iter__(self):
return iter(self.__proxied)
def __reversed__(self):
return reversed(self.__proxied)
def index(self, value, *args, **kwargs):
return self.__proxied.index(value, *args, **kwargs)
def count(self, value):
return self.__proxied.count(value)
def parse_time(timestamp):
if timestamp:
return datetime.datetime(*map(int, re.split(r'[^\d]', timestamp.replace('+00:00', ''))))
return None
def deprecated(instead=None):
def actual_decorator(func):
@functools.wraps(func)
def decorated(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
if instead:
fmt = "{0.__name__} is deprecated, use {1} instead."
else:
fmt = '{0.__name__} is deprecated.'
warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return decorated
return actual_decorator
def oauth_url(client_id, permissions=None, guild=None, redirect_uri=None):
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: :class:`str`
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.Guild`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = 'https://discord.com/oauth2/authorize?client_id={}&scope=bot'.format(client_id)
if permissions is not None:
url = url + '&permissions=' + str(permissions.value)
if guild is not None:
url = url + "&guild_id=" + str(guild.id)
if redirect_uri is not None:
from urllib.parse import urlencode
url = url + "&response_type=code&" + urlencode({'redirect_uri': redirect_uri})
return url
def snowflake_time(id):
"""
Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
The creation date in UTC of a Discord snowflake ID."""
return datetime.datetime.utcfromtimestamp(((id >> 22) + DISCORD_EPOCH) / 1000)
def time_snowflake(datetime_obj, high=False):
"""Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1`` to be inclusive, ``high=True`` to be exclusive
When using as the higher end of a range, use ``time_snowflake(high=True)`` + 1 to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
datetime_obj: :class:`datetime.datetime`
A timezone-naive datetime object representing UTC time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
"""
unix_seconds = (datetime_obj - type(datetime_obj)(1970, 1, 1)).total_seconds()
discord_millis = int(unix_seconds * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2**22-1 if high else 0)
def find(predicate, seq):
"""A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: iterable
The iterable to search through.
"""
for element in seq:
if predicate(element):
return element
return None
def get(iterable, **attrs):
r"""A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
"""
# global -> local
_all = all
attrget = attrgetter
# Special case the single element call
if len(attrs) == 1:
k, v = attrs.popitem()
pred = attrget(k.replace('__', '.'))
for elem in iterable:
if pred(elem) == v:
return elem
return None
converted = [
(attrget(attr.replace('__', '.')), value)
for attr, value in attrs.items()
]
for elem in iterable:
if _all(pred(elem) == value for pred, value in converted):
return elem
return None
def _unique(iterable):
seen = set()
adder = seen.add
return [x for x in iterable if not (x in seen or adder(x))]
def _get_as_snowflake(data, key):
try:
value = data[key]
except KeyError:
return None
else:
return value and int(value)
def _get_mime_type_for_image(data):
if data.startswith(b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A'):
return 'image/png'
elif data[0:3] == b'\xff\xd8\xff' or data[6:10] in (b'JFIF', b'Exif'):
return 'image/jpeg'
elif data.startswith((b'\x47\x49\x46\x38\x37\x61', b'\x47\x49\x46\x38\x39\x61')):
return 'image/gif'
elif data.startswith(b'RIFF') and data[8:12] == b'WEBP':
return 'image/webp'
else:
raise InvalidArgument('Unsupported image type given')
def _bytes_to_base64_data(data):
fmt = 'data:{mime};base64,{data}'
mime = _get_mime_type_for_image(data)
b64 = b64encode(data).decode('ascii')
return fmt.format(mime=mime, data=b64)
def to_json(obj):
return json.dumps(obj, separators=(',', ':'), ensure_ascii=True)
def _parse_ratelimit_header(request, *, use_clock=False):
reset_after = request.headers.get('X-Ratelimit-Reset-After')
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers['X-Ratelimit-Reset']), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
async def async_all(gen, *, check=_isawaitable):
for elem in gen:
if check(elem):
elem = await elem
if not elem:
return False
return True
async def sane_wait_for(futures, *, timeout):
ensured = [
asyncio.ensure_future(fut) for fut in futures
]
done, pending = await asyncio.wait(ensured, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
if len(pending) != 0:
raise asyncio.TimeoutError()
return done
async def sleep_until(when, result=None):
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be in UTC.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
if when.tzinfo is None:
when = when.replace(tzinfo=datetime.timezone.utc)
now = datetime.datetime.now(datetime.timezone.utc)
delta = (when - now).total_seconds()
while delta > MAX_ASYNCIO_SECONDS:
await asyncio.sleep(MAX_ASYNCIO_SECONDS)
delta -= MAX_ASYNCIO_SECONDS
return await asyncio.sleep(max(delta, 0), result)
def valid_icon_size(size):
"""Icons must be power of 2 within [16, 4096]."""
return not size & (size - 1) and size in range(16, 4097)
class SnowflakeList(array.array):
"""Internal data storage class to efficiently store a list of snowflakes.
This should have the following characteristics:
- Low memory usage
- O(n) iteration (obviously)
- O(n log n) initial creation if data is unsorted
- O(log n) search and indexing
- O(n) insertion
"""
__slots__ = ()
def __new__(cls, data, *, is_sorted=False):
return array.array.__new__(cls, 'Q', data if is_sorted else sorted(data))
def add(self, element):
i = bisect_left(self, element)
self.insert(i, element)
def get(self, element):
i = bisect_left(self, element)
return self[i] if i != len(self) and self[i] == element else None
def has(self, element):
i = bisect_left(self, element)
return i != len(self) and self[i] == element
_IS_ASCII = re.compile(r'^[\x00-\x7f]+$')
def _string_width(string, *, _IS_ASCII=_IS_ASCII):
"""Returns string's width."""
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = 'WFA'
width = 0
func = unicodedata.east_asian_width
for char in string:
width += 2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1
return width
def resolve_invite(invite):
"""
Resolves an invite from a :class:`~discord.Invite`, URL or code.
Parameters
-----------
invite: Union[:class:`~discord.Invite`, :class:`str`]
The invite.
Returns
--------
:class:`str`
The invite code.
"""
from .invite import Invite # circular import
if isinstance(invite, Invite):
return invite.code
else:
rx = r'(?:https?\:\/\/)?discord(?:\.gg|(?:app)?\.com\/invite)\/(.+)'
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
def resolve_template(code):
"""
Resolves a template code from a :class:`~discord.Template`, URL or code.
.. versionadded:: 1.4
Parameters
-----------
code: Union[:class:`~discord.Template`, :class:`str`]
The code.
Returns
--------
:class:`str`
The template code.
"""
from .template import Template # circular import
if isinstance(code, Template):
return code.code
else:
rx = r'(?:https?\:\/\/)?discord(?:\.new|(?:app)?\.com\/template)\/(.+)'
m = re.match(rx, code)
if m:
return m.group(1)
return code
_MARKDOWN_ESCAPE_SUBREGEX = '|'.join(r'\{0}(?=([\s\S]*((?<!\{0})\{0})))'.format(c)
for c in ('*', '`', '_', '~', '|'))
_MARKDOWN_ESCAPE_COMMON = r'^>(?:>>)?\s|\[.+\]\(.+\)'
_MARKDOWN_ESCAPE_REGEX = re.compile(r'(?P<markdown>%s|%s)' % (_MARKDOWN_ESCAPE_SUBREGEX, _MARKDOWN_ESCAPE_COMMON))
def escape_markdown(text, *, as_needed=False, ignore_links=True):
r"""A helper function that escapes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to escape markdown from.
as_needed: :class:`bool`
Whether to escape the markdown characters as needed. This
means that it does not escape extraneous characters if it's
not necessary, e.g. ``**hello**`` is escaped into ``\*\*hello**``
instead of ``\*\*hello\*\*``. Note however that this can open
you up to some clever syntax abuse. Defaults to ``False``.
ignore_links: :class:`bool`
Whether to leave links alone when escaping markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. This option is not supported with ``as_needed``.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters escaped with a slash.
"""
if not as_needed:
url_regex = r'(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])'
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get('url')
if is_url:
return is_url
return '\\' + groupdict['markdown']
regex = r'(?P<markdown>[_\\~|\*`]|%s)' % _MARKDOWN_ESCAPE_COMMON
if ignore_links:
regex = '(?:%s|%s)' % (url_regex, regex)
return re.sub(regex, replacement, text)
else:
text = re.sub(r'\\', r'\\\\', text)
return _MARKDOWN_ESCAPE_REGEX.sub(r'\\\1', text)
def escape_mentions(text):
"""A helper function that escapes everyone, here, role, and user mentions.
.. note::
This does not include channel mentions.
Parameters
-----------
text: :class:`str`
The text to escape mentions from.
Returns
--------
:class:`str`
The text with the mentions removed.
"""
return re.sub(r'@(everyone|here|[!&]?[0-9]{17,21})', '@\u200b\\1', text)
| |
# -*- coding: utf-8 -*-
import uuid
from . import ABCParticles
from .particles_items import Particle, Bond
from ..core import CUBA
from ..core.data_container import DataContainer
class Particles(ABCParticles):
"""Class that represents a container of particles and bonds.
Class provides methods to add particles and bonds, remove them and update
them.
Attributes
----------
name : str
name of the particle container
_particles : dict
data structure for particles storage
_bonds : dict
data structure for bonds storage
data : DataContainer
data attributes of the element
"""
cuba_key = CUBA.PARTICLES
def __init__(self, name):
""" Constructor
Parameters
----------
name : str
name of the particle container
"""
self._particles = {}
self._bonds = {}
self._data = DataContainer()
self._name = name
self._items_count = {
CUBA.PARTICLE: lambda: self._particles,
CUBA.BOND: lambda: self._bonds
}
self._uid = uuid.uuid4()
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def uid(self):
return self._uid
@property
def data(self):
return self._data
@data.setter
def data(self, new_data):
self._data = DataContainer(new_data)
def count_of(self, item_type):
""" Return the count of item_type in the container.
Parameters
----------
item_type : CUBA
The CUBA enum of the type of the items to return the count of.
Returns
-------
count : int
The number of items of item_type in the container.
Raises
------
ValueError :
If the type of the item is not supported in the current
container.
"""
try:
return len(self._items_count[item_type]())
except KeyError:
error_str = "Trying to obtain count of a non-supported item: {}"
raise ValueError(error_str.format(item_type))
# Subtype specific methods ###############################################
def _add_particles(self, iterable):
"""Adds a set of particles from the provided iterable
to the container.
If any particle have no uids, the container
will generate a new uids for it. If the particle has
already an uids, it won't add the particle if a particle
with the same uid already exists. If the user wants to replace
an existing particle in the container there is an 'update_particles'
method for that purpose.
Parameters
----------
iterable : iterable of Particle objects
the new set of particles that will be included in the container.
Returns
-------
uids : list of uuid.UUID
The uids of the added particles.
Raises
------
ValueError :
when there is a particle with an uids that already exists
in the container.
Examples
--------
Add a set of particles to a Particles container.
>>> particle_list = [Particle(), Particle()]
>>> particles = Particles(name="foo")
>>> uids = particles.add_particles(particle_list)
"""
uids = []
for particle in iterable:
uid = self._add_element(
self._particles, particle, clone=Particle.from_particle)
uids.append(uid)
return uids
def _add_bonds(self, iterable):
"""Adds a set of bonds to the container.
Also like with particles, if any bond has a defined uid,
it won't add the bond if a bond with the same uid already exists, and
if the bond has no uid the particle container will generate an
uid. If the user wants to replace an existing bond in the
container there is an 'update_bonds' method for that purpose.
Parameters
----------
iterable : iterable of Bond objects
the new bond that will be included in the container.
Returns
-------
uuid : list of uuid.UUID
The uuids of the added bonds.
Raises
------
ValueError :
when there is a bond with an uuid that already exists
in the container.
Examples
--------
Add a set of bonds to a Particles container.
>>> bonds_list = [Bond(), Bond()]
>>> particles = Particles(name="foo")
>>> particles.add_bond(bonds_list)
"""
uids = []
for bond in iterable:
uid = self._add_element(self._bonds, bond, Bond.from_bond)
uids.append(uid)
return uids
def _update_particles(self, iterable):
"""Updates a set of particles from the provided iterable.
Takes the uids of the particles and searches inside the container for
those particles. If the particles exists, they are replaced in the
container. If any particle doesn't exist, it will raise an exception.
Parameters
----------
iterable : iterable of Particle objects
the particles that will be replaced.
Raises
------
ValueError :
If any particle inside the iterable does not exist.
Examples
--------
Given a set of Particle objects that already exists in the container
(taken with the 'get_particle' method for example), just call the
function passing the Particle items as parameter.
>>> part_container = Particles(name="foo")
>>> ...
>>> part1 = part_container.get_particle(uid1)
>>> part2 = part_container.get_particle(uid2)
>>> ... #do whatever you want with the particles
>>> part_container.update_particles([part1, part2])
"""
for particle in iterable:
self._update_element(
self._particles, particle, clone=Particle.from_particle)
def _update_bonds(self, iterable):
"""Updates a set of bonds from the provided iterable.
Takes the uids of the bonds and searches inside the container for
those bond. If the bonds exists, they are replaced in the container.
If any bond doesn't exist, it will raise an exception.
Parameters
----------
iterable : iterable of Bond objects
the bonds that will be replaced.
Raises
------
ValueError :
If any bond doesn't exist.
Examples
--------
Given a set of Bond objects that already exists in the container
(taken with the 'get_bond' method for example) just call the
function passing the set of Bond as parameter.
>>> particles = Particles(name="foo")
>>> ...
>>> bond1 = particles.get_bond(uid1)
>>> bond2 = particles.get_bond(uid2)
>>> ... #do whatever you want with the bonds
>>> particles.update_bond([bond1, bond2])
"""
for bond in iterable:
self._update_element(self._bonds, bond, clone=Bond.from_bond)
def _get_particle(self, uid):
"""Returns a copy of the particle with the 'particle_id' id.
Parameters
----------
uid : uuid.UUID
the uid of the particle
Raises
------
KeyError :
when the particle is not in the container.
Returns
-------
particle : Particle
A copy of the internally stored particle info.
"""
particle = self._particles[uid]
return Particle.from_particle(particle)
def _get_bond(self, uid):
"""Returns a copy of the bond with the 'bond_id' id.
Parameters
----------
uid : uuid.UUID
the uid of the bond
Raises
------
KeyError :
when the bond is not in the container.
Returns
-------
bond : Bond
A copy of the internally stored bond info.
"""
bond = self._bonds[uid]
return Bond.from_bond(bond)
def _remove_particles(self, uids):
"""Remove the particles with the provided uids from the container.
The uids inside the iterable should exists in the container. Otherwise
an exception will be raised.
Parameters
----------
uids : iterable of uuid.UUID
the uids of the particles to be removed.
Raises
------
KeyError :
If any particle doesn't exist.
Examples
--------
Having a set of uids of existing particles, pass it to the method.
>>> particles = Particles(name="foo")
>>> ...
>>> particle1 = particles.get_particle(uid1)
>>> particle2 = particles.get_particle(uid2)
>>> ...
>>> particles.remove_particles([part1.uid, part2.uid)
or directly
>>> particles.remove_particles([uid1, uid2])
"""
for uid in uids:
del self._particles[uid]
def _remove_bonds(self, uids):
"""Remove the bonds with the provided uids.
The uids passed as parameter should exists in the container. If
any uid doesn't exist, an exception will be raised.
Parameters
----------
uids : iterable of uuid.UUID
the uids of the bonds to be removed.
Examples
--------
Having a set of uids of existing bonds, pass it to the method.
>>> particles = Particles(name="foo")
>>> ...
>>> bond1 = particles.get_bond(uid1)
>>> bond2 = particles.get_bond(uid2)
>>> ...
>>> particles.remove_bonds([bond1.uid, bond2.uid])
or
>>> particles.remove_bonds([uid1, uid2])
"""
for uid in uids:
del self._bonds[uid]
def _iter_particles(self, uids=None):
"""Generator method for iterating over the particles of the container.
It can receive any kind of sequence of particle uids to iterate over
those concrete particles. If nothing is passed as parameter, it will
iterate over all the particles.
Parameters
----------
uids : iterable of uuid.UUID, optional
sequence containing the uids of the particles that will be
iterated. When the uids are provided, then the particles are
returned in the same order the uids are returned by the iterable.
If uids is None, then all particles are returned by the iterable
and there is no restriction on the order that they are returned.
Yields
------
particle : Particle
The Particle item.
Raises
------
KeyError :
if any of the ids passed as parameters are not in the container.
Examples
--------
It can be used with a sequence as parameter or without it:
>>> part_container = Particles(name="foo")
>>> ...
>>> for particle in part_container.iter_particles([uid1, uid2, uid3]):
... #do stuff
#take the particle back to the container so it will be updated
#in case we need it
part_container.update_particles([particle])
>>> for particle in part_container.iter_particles():
... #do stuff; it will iterate over all the particles
#take the particle back to the container so it will be updated
#in case we need it
part_container.update_particles([particle])
"""
if uids is None:
return self._iter_all(
self._particles, clone=Particle.from_particle)
else:
return self._iter_elements(
self._particles, uids, clone=Particle.from_particle)
def _iter_bonds(self, uids=None):
"""Generator method for iterating over the bonds of the container.
It can receive any kind of sequence of bond ids to iterate over
those concrete bond. If nothing is passed as parameter, it will
iterate over all the bonds.
Parameters
----------
uids : iterable of uuid.UUID, optional
sequence containing the id's of the bond that will be iterated.
When the uids are provided, then the bonds are returned in
the same order the uids are returned by the iterable. If uids is
None, then all bonds are returned by the iterable and there
is no restriction on the order that they are returned.
Yields
------
bond : Bond
The next Bond item
Raises
------
KeyError :
if any of the ids passed as parameters are not in the container.
Examples
--------
It can be used with a sequence as parameter or without it:
>>> part_container = Particles(name="foo")
>>> ...
>>> for bond in part_container.iter_bonds([id1, id2, id3]):
... #do stuff
#take the bond back to the container so it will be updated
#in case we need it
part_container.update_bond(bond)
>>> for bond in part_container.iter_bond():
... #do stuff; it will iterate over all the bond
#take the bond back to the container so it will be updated
#in case we need it
part_container.update_bond(bond)
"""
if uids is None:
return self._iter_all(self._bonds, clone=Bond.from_bond)
else:
return self._iter_elements(
self._bonds, uids, clone=Bond.from_bond)
def _has_particle(self, uid):
"""Checks if a particle with the given uid already exists
in the container."""
return uid in self._particles
def _has_bond(self, uid):
"""Checks if a bond with the given uid already exists
in the container."""
return uid in self._bonds
# Utility methods ########################################################
def _iter_elements(self, cur_dict, cur_ids, clone):
for cur_id in cur_ids:
item = cur_dict[cur_id]
yield clone(item)
def _iter_all(self, cur_dict, clone):
for cur_element in cur_dict.itervalues():
yield clone(cur_element)
def _add_element(self, cur_dict, element, clone):
# We check if the current dictionary has the element
cur_id = element.uid
if cur_id is None:
cur_id = uuid.uuid4()
element.uid = cur_id
cur_dict[cur_id] = clone(element)
else:
if cur_id not in cur_dict:
# Means the element is not in the dict - hence we can add it
cur_dict[cur_id] = clone(element)
else:
message = "Item with id:{} already exists"
raise ValueError(message.format(element))
return cur_id
def _update_element(self, cur_dict, element, clone):
uid = element.uid
if uid in cur_dict:
cur_dict[uid] = clone(element)
else:
raise ValueError('id: {} does not exist'.format(uid))
| |
"""
Some algorithms related to lattices.
Here we are concerned with determining random variable induced sigma-algebras.
That is, we want to know the subsigma-algebra (of the underlying sigma-algebra)
that corresponds to a random variable. However, we consider only those random
variables that map outcomes in the underlying sigma-algebra to a projection of
that outcome. For example, if an outcome in the underlying sigma-algebra is
(1,2,3), then random variable `X_1` would take on the value (2,) and the joint
random variable `X_0,X_2` has the value (1,2). By considering all possible
induced sigma-algebras, we are equivalently considering all possible partitions
of the sample space.
Relationship to Intersection Information based on Common Randomness
--------------------------------------------------------------------
In general, the meet and join operators are defined with respect to information
equivalence, which in turn, derives from the notions of informationally
richer/poorer. In this module, richer and poorer correspond to refinements and
coarsenings of the sample space and thus, depend explicitly on the structure
of the underlying sigma-algebra.
As an example, consider the r.v.s. X and Y, where X = 1 and Y = X with p=1 and
Y = 0 with p=0. There is a sense in X and Y have the same distribution. However,
X and Y are not informationally equivalent as there is a function f such that
X = f(Y) where f maps 0 and 1 to 1, but not the other way around. One imagines
X as the coarsest possible partition of the sample space, whereas Y is a
refinement. To wit, the sample space is {10,11} with p(10) = 0 and p(11) = 1.
Then X corresponds to the partition: {(10, 11)} while Y is {(10,), (11,)}.
In [1], richer and poorer are defined in terms of probability almost surely.
That is, X is informationally poorer than Y if X = f(Y) almost surely. This
means that the definition is not as sensitive to the structure of the
underlying sigma-algebra. In the above example, X and Y are now informationally
equivalent, even though Y is a refinement of the partition corresponding to X.
In general, there is a trend that coarser partitions correspond to
informationally poorer random variables (but due to the partial ordering, not
all partitions are comparable to one another). In [1], a (comparable) coarser
partition is only poorer if the coarsening involves outcomes with nonzero
probability. Said another way, a r.v. that corresponds to a refinement of
another r.v. is informationally richer only if it refines outcomes which have
nonzero probability.
If the behavior of [1] is desired, one must make sure to prune the sample space
of any outcomes that have zero probability. Then, the implementation here will
give the same results as [1].
[1] "Intersection Information based on Common Randomness"
http://arxiv.org/abs/1310.1538
"""
from collections import defaultdict
import dit
from ..helpers import parse_rvs, RV_MODES
from ..math import sigma_algebra, atom_set
from ..utils import quasilexico_key
__all__ = (
'dist_from_induced_sigalg',
'induced_sigalg',
'insert_join',
'insert_meet',
'insert_rv',
'join',
'join_sigalg',
'meet',
'meet_sigalg',
'sigma_algebra_sort',
)
def sigma_algebra_sort(sigalg):
"""
Put the sigma algebra in quasi-lexicographical order.
"""
sigalg = [tuple(sorted(cet)) for cet in sigalg]
sigalg.sort(key=quasilexico_key)
return sigalg
def induced_sigalg(dist, rvs, rv_mode=None):
"""
Returns the induced sigma-algebra of the random variable defined by `rvs`.
Parameters
----------
dist : Distribution
The distribution which defines the base sigma-algebra.
rvs : list
The indexes of the random variable used to calculate the induced
sigma algebra.
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
Returns
-------
F : frozenset of frozensets
The induced sigma-algebra.
"""
# This is brute force and ugly.
#
# Implementation:
# 1) Find induced atoms from atoms of new sigma-algebra:
# X^{-1}(A) = { w : X(w) \in A }
# where A = \{a\} and a is a nonzero outcome in the marginal.
# 2) Generate sigma algebra from induced atoms.
#
# Step 2 may not be necessary.
#
indexes = parse_rvs(dist, rvs, rv_mode=rv_mode, unique=True, sort=True)[1]
# This creates a mapping from new outcomes (defined by rvs) to the
# original outcomes which map to those new outcomes. This defines a
# partition of the original outcomes.
d = defaultdict(list)
ctor = dist._outcome_ctor
for outcome, _ in dist.zipped(mode='atoms'):
# Build a list of inner outcomes. "c" stands for "constructed".
# We need to iterate over all atoms, not just those in pmf since
# we are trying to partition the sample space.
c_outcome = ctor([outcome[i] for i in indexes])
d[c_outcome].append(outcome)
atoms = frozenset(map(frozenset, d.values()))
F = sigma_algebra(atoms)
return F
def join_sigalg(dist, rvs, rv_mode=None):
"""
Returns the sigma-algebra of the join of random variables defined by `rvs`.
Parameters
----------
dist : Distribution
The distribution which defines the base sigma-algebra.
rvs : list
A list of lists. Each list specifies a random variable to be
joined with the other lists. Each random variable can defined as a
series of unique indexes. Multiple random variables can use the same
index. For example, [[0, 1], [1, 2]].
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
Returns
-------
jsa : frozenset of frozensets
The induced sigma-algebra of the join.
"""
# We require unique indexes within each random variable and want the
# indexes in distribution order. We don't need the names.
parse = lambda rv: parse_rvs(dist, rv, rv_mode=rv_mode,
unique=False, sort=True)[1]
indexes = [parse(rv) for rv in rvs]
sigalgs = [induced_sigalg(dist, rv, rv_mode=RV_MODES.INDICES)
for rv in indexes]
# \sigma( X join Y ) = \sigma( \sigma(X) \cup \sigma(Y) )
# Union all the sigma algebras.
union_sa = frozenset().union(*sigalgs)
jsa = sigma_algebra(union_sa)
return jsa
def meet_sigalg(dist, rvs, rv_mode=None):
"""
Returns the sigma-algebra of the meet of random variables defined by `rvs`.
Parameters
----------
dist : Distribution
The distribution which defines the base sigma-algebra.
rvs : list
A list of lists. Each list specifies a random variable to be
met with the other lists. Each random variable can defined as a
series of unique indexes. Multiple random variables can use the same
index. For example, [[0,1],[1,2]].
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
Returns
-------
msa : frozenset of frozensets
The induced sigma-algebra of the meet.
"""
# We require unique indexes within each random variable and want the
# indexes in distribution order. We don't need the names.
parse = lambda rv: parse_rvs(dist, rv, rv_mode=rv_mode,
unique=False, sort=True)[1]
indexes = [parse(rv) for rv in rvs]
sigalgs = [induced_sigalg(dist, rv, rv_mode=RV_MODES.INDICES)
for rv in indexes]
# \sigma( X meet Y ) = \sigma(X) \cap \sigma(Y) )
# Intersect all the sigma algebras.
first_sa = sigalgs[0]
msa = first_sa.intersection(*sigalgs[1:])
return msa
def dist_from_induced_sigalg(dist, sigalg, int_outcomes=True):
"""
Returns the distribution associated with an induced sigma algebra.
The sigma algebra is induced by a random variable from a probability
space defined by `dist`.
Parameters
----------
dist : Distribution
The distribution which defines the base sigma-algebra.
sigalg : frozenset
A sigma-algebra induced by a random variable from `dist`.
int_outcomes : bool
If `True`, then the outcomes of the induced distribution are relabeled
as integers instead of the atoms of the induced sigma-algebra.
Returns
-------
d : ScalarDistribution
The distribution of the induced sigma algebra.
"""
from dit import ScalarDistribution
atoms = atom_set(sigalg)
if int_outcomes:
atoms = [sorted(atom) for atom in atoms]
atoms.sort(key=quasilexico_key)
pmf = [dist.event_probability(atom) for atom in atoms]
if int_outcomes:
outcomes = range(len(atoms))
else:
# Outcomes must be sequences.
outcomes = [tuple(sorted(atom)) for atom in atoms]
d = ScalarDistribution(outcomes, pmf, base=dist.get_base())
return d
def join(dist, rvs, rv_mode=None, int_outcomes=True):
"""
Returns the distribution of the join of random variables defined by `rvs`.
Parameters
----------
dist : Distribution
The distribution which defines the base sigma-algebra.
rvs : list
A list of lists. Each list specifies a random variable to be
joined with the other lists. Each random variable can defined as a
series of unique indexes. Multiple random variables can use the same
index. For example, [[0, 1], [1, 2]].
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
int_outcomes : bool
If `True`, then the outcomes of the join are relabeled as integers
instead of as the atoms of the induced sigma-algebra.
Returns
-------
d : ScalarDistribution
The distribution of the join.
"""
join_sa = join_sigalg(dist, rvs, rv_mode)
d = dist_from_induced_sigalg(dist, join_sa, int_outcomes)
return d
def meet(dist, rvs, rv_mode=None, int_outcomes=True):
"""
Returns the distribution of the meet of random variables defined by `rvs`.
Parameters
----------
dist : Distribution
The distribution which defines the base sigma-algebra.
rvs : list
A list of lists. Each list specifies a random variable to be
met with the other lists. Each random variable can defined as a
series of unique indexes. Multiple random variables can use the same
index. For example, [[0, 1], [1, 2]].
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
int_outcomes : bool
If `True`, then the outcomes of the meet are relabeled as integers
instead of as the atoms of the induced sigma-algebra.
Returns
-------
d : ScalarDistribution
The distribution of the meet.
"""
meet_sa = meet_sigalg(dist, rvs, rv_mode)
d = dist_from_induced_sigalg(dist, meet_sa, int_outcomes)
return d
def insert_rv(dist, idx, sigalg):
"""
Returns a new distribution with a random variable inserted at index `idx`.
The random variable is constructed according to its induced sigma-algebra.
Parameters
----------
dist : Distribution
The distribution which defines the base sigma-algebra.
idx : int
The index at which to insert the random variable. To append, set `idx`
to be equal to -1 or dist.outcome_length().
sigalg : frozenset
The sigma-algebra induced by the random variable.
Returns
-------
d : Distribution
The new distribution.
"""
from itertools import chain
if idx == -1:
idx = dist.outcome_length()
if not 0 <= idx <= dist.outcome_length():
raise IndexError('Invalid insertion index.')
# Provide sane sorting of atoms
atoms = atom_set(sigalg)
atoms = [sorted(atom) for atom in atoms]
atoms.sort(key=quasilexico_key)
if dist._outcome_class == str:
# Then the labels for the new random variable must be strings.
from string import ascii_letters, digits
labels = (digits + ascii_letters)[:len(atoms)]
else:
labels = range(len(atoms))
# Create an index from outcomes to atoms.
atom_of = {}
for label, atom in zip(labels, atoms):
for outcome in atom:
atom_of[outcome] = label
if idx == dist.outcome_length():
def new_outcome_ctor(outcome, ctor=dist._outcome_ctor):
"""The end of the outcome"""
new_outcome = [outcome, [atom_of[outcome]]]
return ctor(chain.from_iterable(new_outcome))
elif idx == 0:
def new_outcome_ctor(outcome, ctor=dist._outcome_ctor):
"""The beginning of the outcome"""
new_outcome = [[atom_of[outcome]], outcome]
return ctor(chain.from_iterable(new_outcome))
else:
def new_outcome_ctor(outcome, ctor=dist._outcome_ctor):
"""In the middle of the outcome"""
new_outcome = [outcome[:idx], [atom_of[outcome]], outcome[idx:]]
return ctor(chain.from_iterable(new_outcome))
d = dit.modify_outcomes(dist, new_outcome_ctor)
return d
def insert_join(dist, idx, rvs, rv_mode=None):
"""
Returns a new distribution with the join inserted at index `idx`.
The join of the random variables in `rvs` is constructed and then inserted
into at index `idx`.
Parameters
----------
dist : Distribution
The distribution which defines the base sigma-algebra.
idx : int
The index at which to insert the join. To append the join, set `idx`
to be equal to -1 or dist.outcome_length().
rvs : list
A list of lists. Each list specifies a random variable to be
met with the other lists. Each random variable can defined as a
series of unique indexes. Multiple random variables can use the same
index. For example, [[0, 1], [1, 2]].
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
Returns
-------
d : Distribution
The new distribution with the join at index `idx`.
"""
jsa = join_sigalg(dist, rvs, rv_mode)
d = insert_rv(dist, idx, jsa)
return d
def insert_meet(dist, idx, rvs, rv_mode=None):
"""
Returns a new distribution with the meet inserted at index `idx`.
The meet of the random variables in `rvs` is constructed and then inserted
into at index `idx`.
Parameters
----------
dist : Distribution
The distribution which defines the base sigma-algebra.
idx : int
The index at which to insert the meet. To append the meet, set `idx`
to be equal to -1 or dist.outcome_length().
rvs : list
A list of lists. Each list specifies a random variable to be
met with the other lists. Each random variable can defined as a
series of unique indexes. Multiple random variables can use the same
index. For example, [[0,1],[1,2]].
rv_mode : str, None
Specifies how to interpret the elements of `rvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`rvs` are interpreted as random variable indices. If equal to 'names',
the the elements are interpreted as random variable names. If `None`,
then the value of `dist._rv_mode` is consulted.
Returns
-------
d : Distribution
The new distribution with the meet at index `idx`.
"""
msa = meet_sigalg(dist, rvs, rv_mode)
d = insert_rv(dist, idx, msa)
return d
| |
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
from __future__ import print_function
import contextlib
import fnmatch
import jinja2
import os
import re
import sys
import anyconfig
import colorama
import yaml
from molecule.logger import get_logger
LOG = get_logger(__name__)
MERGE_STRATEGY = anyconfig.MS_DICTS
class SafeDumper(yaml.SafeDumper):
def increase_indent(self, flow=False, indentless=False):
return super(SafeDumper, self).increase_indent(flow, False)
def print_debug(title, data):
title = 'DEBUG: {}'.format(title)
title = [
colorama.Back.WHITE, colorama.Style.BRIGHT, colorama.Fore.BLACK, title,
colorama.Fore.RESET, colorama.Back.RESET, colorama.Style.RESET_ALL
]
print(''.join(title))
data = [
colorama.Fore.BLACK, colorama.Style.BRIGHT, data,
colorama.Style.RESET_ALL, colorama.Fore.RESET
]
print(''.join(data))
def print_environment_vars(env):
"""
Print ``Ansible`` and ``Molecule`` environment variables and returns None.
:param env: A dict containing the shell's environment as collected by
``os.environ``.
:return: None
"""
ansible_env = {k: v for (k, v) in env.items() if 'ANSIBLE_' in k}
print_debug('ANSIBLE ENVIRONMENT', safe_dump(ansible_env))
molecule_env = {k: v for (k, v) in env.items() if 'MOLECULE_' in k}
print_debug('MOLECULE ENVIRONMENT', safe_dump(molecule_env))
combined_env = ansible_env.copy()
combined_env.update(molecule_env)
print_debug(
'SHELL REPLAY', " ".join(
["{}={}".format(k, v) for (k, v) in sorted(combined_env.items())]))
print()
def sysexit(code=1):
sys.exit(code)
def sysexit_with_message(msg, code=1):
LOG.critical(msg)
sysexit(code)
def run_command(cmd, debug=False):
"""
Execute the given command and returns None.
:param cmd: A ``sh.Command`` object to execute.
:param debug: An optional bool to toggle debug output.
:return: ``sh`` object
"""
if debug:
# WARN(retr0h): Uses an internal ``sh`` data structure to dig
# the environment out of the ``sh.command`` object.
print_environment_vars(cmd._partial_call_args.get('env', {}))
print_debug('COMMAND', str(cmd))
print()
return cmd(_truncate_exc=False)
def os_walk(directory, pattern, excludes=[]):
for root, dirs, files in os.walk(directory, topdown=True):
dirs[:] = [d for d in dirs if d not in excludes]
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def render_template(template, **kwargs):
t = jinja2.Environment()
t = t.from_string(template)
return t.render(kwargs)
def write_file(filename, content):
"""
Writes a file with the given filename and content and returns None.
:param filename: A string containing the target filename.
:param content: A string containing the data to be written.
:return: None
"""
with open_file(filename, 'w') as f:
f.write(content)
file_prepender(filename)
def molecule_prepender(content):
return '# Molecule managed\n\n' + content
def file_prepender(filename):
"""
Prepend an informational header on files managed by Molecule and returns
None.
:param filename: A string containing the target filename.
:return: None
"""
with open_file(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(molecule_prepender(content))
def safe_dump(data):
"""
Dump the provided data to a YAML document and returns a string.
:param data: A string containing an absolute path to the file to parse.
:return: str
"""
# TODO(retr0h): Do we need to encode?
# yaml.dump(data) produces the document as a str object in both python
# 2 and 3.
return yaml.dump(
data, Dumper=SafeDumper, default_flow_style=False, explicit_start=True)
def safe_load(string):
"""
Parse the provided string returns a dict.
:param string: A string to be parsed.
:return: dict
"""
try:
return yaml.safe_load(string) or {}
except yaml.scanner.ScannerError as e:
sysexit_with_message(str(e))
def safe_load_file(filename):
"""
Parse the provided YAML file and returns a dict.
:param filename: A string containing an absolute path to the file to parse.
:return: dict
"""
with open_file(filename) as stream:
return safe_load(stream)
@contextlib.contextmanager
def open_file(filename, mode='r'):
"""
Open the provide file safely and returns a file type.
:param filename: A string containing an absolute path to the file to open.
:param mode: A string describing the way in which the file will be used.
:return: file type
"""
with open(filename, mode) as stream:
yield stream
def instance_with_scenario_name(instance_name, scenario_name):
return '{}-{}'.format(instance_name, scenario_name)
def strip_ansi_escape(string):
return re.sub(r'\x1b[^m]*m', '', string)
def strip_ansi_color(s):
# Taken from tabulate
invisible_codes = re.compile(r'\x1b\[\d*m')
return re.sub(invisible_codes, '', s)
def verbose_flag(options):
verbose = 'v'
verbose_flag = []
for i in range(0, 3):
if options.get(verbose):
verbose_flag = ['-{}'.format(verbose)]
del options[verbose]
if options.get('verbose'):
del options['verbose']
break
verbose = verbose + 'v'
return verbose_flag
def filter_verbose_permutation(options):
return {k: options[k] for k in options if not re.match('^[v]+$', k)}
def title(word):
return ' '.join(x.capitalize() or '_' for x in word.split('_'))
def abs_path(path):
if path:
return os.path.abspath(path)
def camelize(string):
# NOTE(retr0h): Taken from jpvanhal/inflection
# https://github.com/jpvanhal/inflection
return re.sub(r"(?:^|_)(.)", lambda m: m.group(1).upper(), string)
def underscore(string):
# NOTE(retr0h): Taken from jpvanhal/inflection
# https://github.com/jpvanhal/inflection
string = re.sub(r"([A-Z]+)([A-Z][a-z])", r'\1_\2', string)
string = re.sub(r"([a-z\d])([A-Z])", r'\1_\2', string)
string = string.replace("-", "_")
return string.lower()
def merge_dicts(a, b):
"""
Merges the values of B into A and returns a mutated dict A.
::
dict a
b:
- c: 0
- c: 2
d:
e: "aaa"
f: 3
dict b
a: 1
b:
- c: 3
d:
e: "bbb"
Will give an object such as::
{'a': 1, 'b': [{'c': 3}], 'd': {'e': "bbb", 'f': 3}}
:param a: the target dictionary
:param b: the dictionary to import
:return: dict
"""
anyconfig.merge(a, b, ac_merge=MERGE_STRATEGY)
return a
def memoize(function):
memo = {}
def wrapper(*args, **kwargs):
if args not in memo:
rv = function(*args, **kwargs)
memo[args] = rv
return rv
return memo[args]
return wrapper
| |
# Extra stress testing for Bessel functions
# Reference zeros generated with the aid of scipy.special
# jn_zero, jnp_zero, yn_zero, ynp_zero
from mpmath import *
V = 15
M = 15
jn_small_zeros = \
[[2.4048255576957728,
5.5200781102863106,
8.6537279129110122,
11.791534439014282,
14.930917708487786,
18.071063967910923,
21.211636629879259,
24.352471530749303,
27.493479132040255,
30.634606468431975,
33.775820213573569,
36.917098353664044,
40.058425764628239,
43.19979171317673,
46.341188371661814],
[3.8317059702075123,
7.0155866698156188,
10.173468135062722,
13.323691936314223,
16.470630050877633,
19.615858510468242,
22.760084380592772,
25.903672087618383,
29.046828534916855,
32.189679910974404,
35.332307550083865,
38.474766234771615,
41.617094212814451,
44.759318997652822,
47.901460887185447],
[5.1356223018406826,
8.4172441403998649,
11.619841172149059,
14.795951782351261,
17.959819494987826,
21.116997053021846,
24.270112313573103,
27.420573549984557,
30.569204495516397,
33.7165195092227,
36.86285651128381,
40.008446733478192,
43.153453778371463,
46.297996677236919,
49.442164110416873],
[6.3801618959239835,
9.7610231299816697,
13.015200721698434,
16.223466160318768,
19.409415226435012,
22.582729593104442,
25.748166699294978,
28.908350780921758,
32.064852407097709,
35.218670738610115,
38.370472434756944,
41.520719670406776,
44.669743116617253,
47.817785691533302,
50.965029906205183],
[7.5883424345038044,
11.064709488501185,
14.37253667161759,
17.615966049804833,
20.826932956962388,
24.01901952477111,
27.199087765981251,
30.371007667117247,
33.537137711819223,
36.699001128744649,
39.857627302180889,
43.01373772335443,
46.167853512924375,
49.320360686390272,
52.471551398458023],
[8.771483815959954,
12.338604197466944,
15.700174079711671,
18.980133875179921,
22.217799896561268,
25.430341154222704,
28.626618307291138,
31.811716724047763,
34.988781294559295,
38.159868561967132,
41.326383254047406,
44.489319123219673,
47.649399806697054,
50.80716520300633,
53.963026558378149],
[9.9361095242176849,
13.589290170541217,
17.003819667816014,
20.320789213566506,
23.58608443558139,
26.820151983411405,
30.033722386570469,
33.233041762847123,
36.422019668258457,
39.603239416075404,
42.778481613199507,
45.949015998042603,
49.11577372476426,
52.279453903601052,
55.440592068853149],
[11.086370019245084,
14.821268727013171,
18.287582832481726,
21.641541019848401,
24.934927887673022,
28.191188459483199,
31.42279419226558,
34.637089352069324,
37.838717382853611,
41.030773691585537,
44.21540850526126,
47.394165755570512,
50.568184679795566,
53.738325371963291,
56.905249991978781],
[12.225092264004655,
16.037774190887709,
19.554536430997055,
22.94517313187462,
26.266814641176644,
29.54565967099855,
32.795800037341462,
36.025615063869571,
39.240447995178135,
42.443887743273558,
45.638444182199141,
48.825930381553857,
52.007691456686903,
55.184747939289049,
58.357889025269694],
[13.354300477435331,
17.241220382489128,
20.807047789264107,
24.233885257750552,
27.583748963573006,
30.885378967696675,
34.154377923855096,
37.400099977156589,
40.628553718964528,
43.843801420337347,
47.048700737654032,
50.245326955305383,
53.435227157042058,
56.619580266508436,
59.799301630960228],
[14.475500686554541,
18.433463666966583,
22.046985364697802,
25.509450554182826,
28.887375063530457,
32.211856199712731,
35.499909205373851,
38.761807017881651,
42.004190236671805,
45.231574103535045,
48.447151387269394,
51.653251668165858,
54.851619075963349,
58.043587928232478,
61.230197977292681],
[15.589847884455485,
19.61596690396692,
23.275853726263409,
26.773322545509539,
30.17906117878486,
33.526364075588624,
36.833571341894905,
40.111823270954241,
43.368360947521711,
46.608132676274944,
49.834653510396724,
53.050498959135054,
56.257604715114484,
59.457456908388002,
62.651217388202912],
[16.698249933848246,
20.789906360078443,
24.494885043881354,
28.026709949973129,
31.45996003531804,
34.829986990290238,
38.156377504681354,
41.451092307939681,
44.721943543191147,
47.974293531269048,
51.211967004101068,
54.437776928325074,
57.653844811906946,
60.8618046824805,
64.062937824850136],
[17.801435153282442,
21.95624406783631,
25.705103053924724,
29.270630441874802,
32.731053310978403,
36.123657666448762,
39.469206825243883,
42.780439265447158,
46.06571091157561,
49.330780096443524,
52.579769064383396,
55.815719876305778,
59.040934037249271,
62.257189393731728,
65.465883797232125],
[18.899997953174024,
23.115778347252756,
26.907368976182104,
30.505950163896036,
33.993184984781542,
37.408185128639695,
40.772827853501868,
44.100590565798301,
47.400347780543231,
50.678236946479898,
53.93866620912693,
57.184898598119301,
60.419409852130297,
63.644117508962281,
66.860533012260103]]
jnp_small_zeros = \
[[0.0,
3.8317059702075123,
7.0155866698156188,
10.173468135062722,
13.323691936314223,
16.470630050877633,
19.615858510468242,
22.760084380592772,
25.903672087618383,
29.046828534916855,
32.189679910974404,
35.332307550083865,
38.474766234771615,
41.617094212814451,
44.759318997652822],
[1.8411837813406593,
5.3314427735250326,
8.5363163663462858,
11.706004902592064,
14.863588633909033,
18.015527862681804,
21.16436985918879,
24.311326857210776,
27.457050571059246,
30.601922972669094,
33.746182898667383,
36.889987409236811,
40.033444053350675,
43.176628965448822,
46.319597561173912],
[3.0542369282271403,
6.7061331941584591,
9.9694678230875958,
13.170370856016123,
16.347522318321783,
19.512912782488205,
22.671581772477426,
25.826037141785263,
28.977672772993679,
32.127327020443474,
35.275535050674691,
38.422654817555906,
41.568934936074314,
44.714553532819734,
47.859641607992093],
[4.2011889412105285,
8.0152365983759522,
11.345924310743006,
14.585848286167028,
17.78874786606647,
20.9724769365377,
24.144897432909265,
27.310057930204349,
30.470268806290424,
33.626949182796679,
36.781020675464386,
39.933108623659488,
43.083652662375079,
46.232971081836478,
49.381300092370349],
[5.3175531260839944,
9.2823962852416123,
12.681908442638891,
15.964107037731551,
19.196028800048905,
22.401032267689004,
25.589759681386733,
28.767836217666503,
31.938539340972783,
35.103916677346764,
38.265316987088158,
41.423666498500732,
44.579623137359257,
47.733667523865744,
50.886159153182682],
[6.4156163757002403,
10.519860873772308,
13.9871886301403,
17.312842487884625,
20.575514521386888,
23.803581476593863,
27.01030789777772,
30.20284907898166,
33.385443901010121,
36.560777686880356,
39.730640230067416,
42.896273163494417,
46.058566273567043,
49.218174614666636,
52.375591529563596],
[7.501266144684147,
11.734935953042708,
15.268181461097873,
18.637443009666202,
21.931715017802236,
25.183925599499626,
28.409776362510085,
31.617875716105035,
34.81339298429743,
37.999640897715301,
41.178849474321413,
44.352579199070217,
47.521956905768113,
50.687817781723741,
53.85079463676896],
[8.5778364897140741,
12.932386237089576,
16.529365884366944,
19.941853366527342,
23.268052926457571,
26.545032061823576,
29.790748583196614,
33.015178641375142,
36.224380548787162,
39.422274578939259,
42.611522172286684,
45.793999658055002,
48.971070951900596,
52.143752969301988,
55.312820330403446],
[9.6474216519972168,
14.115518907894618,
17.774012366915256,
21.229062622853124,
24.587197486317681,
27.889269427955092,
31.155326556188325,
34.39662855427218,
37.620078044197086,
40.830178681822041,
44.030010337966153,
47.221758471887113,
50.407020967034367,
53.586995435398319,
56.762598475105272],
[10.711433970699945,
15.28673766733295,
19.004593537946053,
22.501398726777283,
25.891277276839136,
29.218563499936081,
32.505247352375523,
35.763792928808799,
39.001902811514218,
42.224638430753279,
45.435483097475542,
48.636922645305525,
51.830783925834728,
55.01844255063594,
58.200955824859509],
[11.770876674955582,
16.447852748486498,
20.223031412681701,
23.760715860327448,
27.182021527190532,
30.534504754007074,
33.841965775135715,
37.118000423665604,
40.371068905333891,
43.606764901379516,
46.828959446564562,
50.040428970943456,
53.243223214220535,
56.438892058982552,
59.628631306921512],
[12.826491228033465,
17.600266557468326,
21.430854238060294,
25.008518704644261,
28.460857279654847,
31.838424458616998,
35.166714427392629,
38.460388720328256,
41.728625562624312,
44.977526250903469,
48.211333836373288,
51.433105171422278,
54.645106240447105,
57.849056857839799,
61.046288512821078],
[13.878843069697276,
18.745090916814406,
22.629300302835503,
26.246047773946584,
29.72897816891134,
33.131449953571661,
36.480548302231658,
39.791940718940855,
43.075486800191012,
46.337772104541405,
49.583396417633095,
52.815686826850452,
56.037118687012179,
59.249577075517968,
62.454525995970462],
[14.928374492964716,
19.88322436109951,
23.81938909003628,
27.474339750968247,
30.987394331665278,
34.414545662167183,
37.784378506209499,
41.113512376883377,
44.412454519229281,
47.688252845993366,
50.945849245830813,
54.188831071035124,
57.419876154678179,
60.641030026538746,
63.853885828967512],
[15.975438807484321,
21.015404934568315,
25.001971500138194,
28.694271223110755,
32.236969407878118,
35.688544091185301,
39.078998185245057,
42.425854432866141,
45.740236776624833,
49.029635055514276,
52.299319390331728,
55.553127779547459,
58.793933759028134,
62.02393848337554,
65.244860767043859]]
yn_small_zeros = \
[[0.89357696627916752,
3.9576784193148579,
7.0860510603017727,
10.222345043496417,
13.361097473872763,
16.500922441528091,
19.64130970088794,
22.782028047291559,
25.922957653180923,
29.064030252728398,
32.205204116493281,
35.346452305214321,
38.487756653081537,
41.629104466213808,
44.770486607221993],
[2.197141326031017,
5.4296810407941351,
8.5960058683311689,
11.749154830839881,
14.897442128336725,
18.043402276727856,
21.188068934142213,
24.331942571356912,
27.475294980449224,
30.618286491641115,
33.761017796109326,
36.90355531614295,
40.045944640266876,
43.188218097393211,
46.330399250701687],
[3.3842417671495935,
6.7938075132682675,
10.023477979360038,
13.209986710206416,
16.378966558947457,
19.539039990286384,
22.69395593890929,
25.845613720902269,
28.995080395650151,
32.143002257627551,
35.289793869635804,
38.435733485446343,
41.581014867297885,
44.725777117640461,
47.870122696676504],
[4.5270246611496439,
8.0975537628604907,
11.396466739595867,
14.623077742393873,
17.81845523294552,
20.997284754187761,
24.166235758581828,
27.328799850405162,
30.486989604098659,
33.642049384702463,
36.794791029185579,
39.945767226378749,
43.095367507846703,
46.2438744334407,
49.391498015725107],
[5.6451478942208959,
9.3616206152445429,
12.730144474090465,
15.999627085382479,
19.22442895931681,
22.424810599698521,
25.610267054939328,
28.785893657666548,
31.954686680031668,
35.118529525584828,
38.278668089521758,
41.435960629910073,
44.591018225353424,
47.744288086361052,
50.896105199722123],
[6.7471838248710219,
10.597176726782031,
14.033804104911233,
17.347086393228382,
20.602899017175335,
23.826536030287532,
27.030134937138834,
30.220335654231385,
33.401105611047908,
36.574972486670962,
39.743627733020277,
42.908248189569535,
46.069679073215439,
49.228543693445843,
52.385312123112282],
[7.8377378223268716,
11.811037107609447,
15.313615118517857,
18.670704965906724,
21.958290897126571,
25.206207715021249,
28.429037095235496,
31.634879502950644,
34.828638524084437,
38.013473399691765,
41.19151880917741,
44.364272633271975,
47.53281875312084,
50.697961822183806,
53.860312300118388],
[8.919605734873789,
13.007711435388313,
16.573915129085334,
19.974342312352426,
23.293972585596648,
26.5667563757203,
29.809531451608321,
33.031769327150685,
36.239265816598239,
39.435790312675323,
42.623910919472727,
45.805442883111651,
48.981708325514764,
52.153694518185572,
55.322154420959698],
[9.9946283820824834,
14.190361295800141,
17.817887841179873,
21.26093227125945,
24.612576377421522,
27.910524883974868,
31.173701563441602,
34.412862242025045,
37.634648706110989,
40.843415321050884,
44.04214994542435,
47.232978012841169,
50.417456447370186,
53.596753874948731,
56.771765754432457],
[11.064090256031013,
15.361301343575925,
19.047949646361388,
22.532765416313869,
25.91620496332662,
29.2394205079349,
32.523270869465881,
35.779715464475261,
39.016196664616095,
42.237627509803703,
45.4474001519274,
48.647941127433196,
51.841036928216499,
55.028034667184916,
58.209970905250097],
[12.128927704415439,
16.522284394784426,
20.265984501212254,
23.791669719454272,
27.206568881574774,
30.555020011020762,
33.859683872746356,
37.133649760307504,
40.385117593813002,
43.619533085646856,
46.840676630553575,
50.051265851897857,
53.253310556711732,
56.448332488918971,
59.637507005589829],
[13.189846995683845,
17.674674253171487,
21.473493977824902,
25.03913093040942,
28.485081336558058,
31.858644293774859,
35.184165245422787,
38.475796636190897,
41.742455848758449,
44.990096293791186,
48.222870660068338,
51.443777308699826,
54.655042589416311,
57.858358441436511,
61.055036135780528],
[14.247395665073945,
18.819555894710682,
22.671697117872794,
26.276375544903892,
29.752925495549038,
33.151412708998983,
36.497763772987645,
39.807134090704376,
43.089121522203808,
46.350163579538652,
49.594769786270069,
52.82620892320143,
56.046916910756961,
59.258751140598783,
62.463155567737854],
[15.30200785858925,
19.957808654258601,
23.861599172945054,
27.504429642227545,
31.011103429019229,
34.434283425782942,
37.801385632318459,
41.128514139788358,
44.425913324440663,
47.700482714581842,
50.957073905278458,
54.199216028087261,
57.429547607017405,
60.65008661807661,
63.862406280068586],
[16.354034360047551,
21.090156519983806,
25.044040298785627,
28.724161640881914,
32.260472459522644,
35.708083982611664,
39.095820003878235,
42.440684315990936,
45.75353669045622,
49.041718113283529,
52.310408280968073,
55.56338698149062,
58.803488508906895,
62.032886550960831,
65.253280088312461]]
ynp_small_zeros = \
[[2.197141326031017,
5.4296810407941351,
8.5960058683311689,
11.749154830839881,
14.897442128336725,
18.043402276727856,
21.188068934142213,
24.331942571356912,
27.475294980449224,
30.618286491641115,
33.761017796109326,
36.90355531614295,
40.045944640266876,
43.188218097393211,
46.330399250701687],
[3.6830228565851777,
6.9414999536541757,
10.123404655436613,
13.285758156782854,
16.440058007293282,
19.590241756629495,
22.738034717396327,
25.884314618788867,
29.029575819372535,
32.174118233366201,
35.318134458192094,
38.461753870997549,
41.605066618873108,
44.74813744908079,
47.891014070791065],
[5.0025829314460639,
8.3507247014130795,
11.574195465217647,
14.760909306207676,
17.931285939466855,
21.092894504412739,
24.249231678519058,
27.402145837145258,
30.552708880564553,
33.70158627151572,
36.849213419846257,
39.995887376143356,
43.141817835750686,
46.287157097544201,
49.432018469138281],
[6.2536332084598136,
9.6987879841487711,
12.972409052292216,
16.19044719506921,
19.38238844973613,
22.559791857764261,
25.728213194724094,
28.890678419054777,
32.048984005266337,
35.204266606440635,
38.357281675961019,
41.508551443818436,
44.658448731963676,
47.807246956681162,
50.95515126455207],
[7.4649217367571329,
11.005169149809189,
14.3317235192331,
17.58443601710272,
20.801062338411128,
23.997004122902644,
27.179886689853435,
30.353960608554323,
33.521797098666792,
36.685048382072301,
39.844826969405863,
43.001910515625288,
46.15685955107263,
49.310088614282257,
52.461911043685864],
[8.6495562436971983,
12.280868725807848,
15.660799304540377,
18.949739756016503,
22.192841809428241,
25.409072788867674,
28.608039283077593,
31.795195353138159,
34.973890634255288,
38.14630522169358,
41.313923188794905,
44.477791768537617,
47.638672065035628,
50.797131066967842,
53.953600129601663],
[9.8147970120105779,
13.532811875789828,
16.965526446046053,
20.291285512443867,
23.56186260680065,
26.799499736027237,
30.015665481543419,
33.216968050039509,
36.407516858984748,
39.590015243560459,
42.766320595957378,
45.937754257017323,
49.105283450953203,
52.269633324547373,
55.431358715604255],
[10.965152105242974,
14.765687379508912,
18.250123150217555,
21.612750053384621,
24.911310600813573,
28.171051927637585,
31.40518108895689,
34.621401012564177,
37.824552065973114,
41.017847386464902,
44.203512240871601,
47.3831408366063,
50.557907466622796,
53.728697478957026,
56.896191727313342],
[12.103641941939539,
15.982840905145284,
19.517731005559611,
22.916962141504605,
26.243700855690533,
29.525960140695407,
32.778568197561124,
36.010261572392516,
39.226578757802172,
42.43122493258747,
45.626783824134354,
48.815117837929515,
51.997606404328863,
55.175294723956816,
58.348990221754937],
[13.232403808592215,
17.186756572616758,
20.770762917490496,
24.206152448722253,
27.561059462697153,
30.866053571250639,
34.137476603379774,
37.385039772270268,
40.614946085165892,
43.831373184731238,
47.037251786726299,
50.234705848765229,
53.425316228549359,
56.610286079882087,
59.790548623216652],
[14.35301374369987,
18.379337301642568,
22.011118775283494,
25.482116178696707,
28.865046588695164,
32.192853922166294,
35.483296655830277,
38.747005493021857,
41.990815194320955,
45.219355876831731,
48.435892856078888,
51.642803925173029,
54.84186659475857,
58.034439083840155,
61.221578745109862],
[15.466672066554263,
19.562077985759503,
23.240325531101082,
26.746322986645901,
30.157042415639891,
33.507642948240263,
36.817212798512775,
40.097251300178642,
43.355193847719752,
46.596103410173672,
49.823567279972794,
53.040208868780832,
56.247996968470062,
59.448441365714251,
62.642721301357187],
[16.574317035530872,
20.73617763753932,
24.459631728238804,
27.999993668839644,
31.438208790267783,
34.811512070805535,
38.140243708611251,
41.436725143893739,
44.708963264433333,
47.962435051891027,
51.201037321915983,
54.427630745992975,
57.644369734615238,
60.852911791989989,
64.054555435720397],
[17.676697936439624,
21.9026148697762,
25.670073356263225,
29.244155124266438,
32.709534477396028,
36.105399554497548,
39.453272918267025,
42.766255701958017,
46.052899215578358,
49.319076602061401,
52.568982147952547,
55.805705507386287,
59.031580956740466,
62.248409689597653,
65.457606670836759],
[18.774423978290318,
23.06220035979272,
26.872520985976736,
30.479680663499762,
33.971869047372436,
37.390118854896324,
40.757072537673599,
44.086572292170345,
47.387688809191869,
50.66667461073936,
53.928009929563275,
57.175005343085052,
60.410169281219877,
63.635442539153021,
66.85235358587768]]
def test_bessel_zeros():
mp.dps = 15
for v in range(V):
for m in range(1,M+1):
print v, m, "of", V, M
# Twice to test cache (if used)
assert besseljzero(v,m).ae(jn_small_zeros[v][m-1])
assert besseljzero(v,m).ae(jn_small_zeros[v][m-1])
assert besseljzero(v,m,1).ae(jnp_small_zeros[v][m-1])
assert besseljzero(v,m,1).ae(jnp_small_zeros[v][m-1])
assert besselyzero(v,m).ae(yn_small_zeros[v][m-1])
assert besselyzero(v,m).ae(yn_small_zeros[v][m-1])
assert besselyzero(v,m,1).ae(ynp_small_zeros[v][m-1])
assert besselyzero(v,m,1).ae(ynp_small_zeros[v][m-1])
if __name__ == "__main__":
test_bessel_zeros()
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'archive',
'ct',
'file',
'perf_dashboard',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/step',
'recipe_engine/time',
'skia_swarming',
]
CT_PAGE_TYPE = '1k'
CT_BINARY = 'run_chromium_perf_swarming'
CT_ISOLATE = 'ct_top1k.isolate'
# Number of slaves to shard CT runs to.
DEFAULT_CT_NUM_SLAVES = 100
# Only upload results to chromeperf if the number of reported webpages is equal
# to or more than the threshold.
NUM_WEBPAGES_REPORTED_THRESHOLD = 940
def _DownloadAndExtractBinary(api):
"""Downloads the binary from the revision passed to the recipe."""
api.archive.download_and_unzip_build(
step_name='Download and Extract Binary',
target='Release',
build_url=None, # This is a required parameter, but has no effect.
build_archive_url=api.properties['parent_build_archive_url'])
def _GetMasterNameInPerfFormat(master_name):
"""Returns the master name in the capitalized format expected by chromeperf.
Eg: chromium.perf.fyi becomes ChromiumPerfFyi.
"""
return ''.join(x for x in master_name.title() if x != '.')
def RunSteps(api):
# Figure out which benchmark to use.
buildername = api.properties['buildername']
if 'Repaint' in buildername:
benchmark = 'repaint'
elif 'RR' in buildername:
benchmark = 'rasterize_and_record_micro'
else:
raise Exception('Do not recognise the buildername %s.' % buildername)
# Checkout CT deps.
api.ct.checkout_dependencies()
# Setup swarming.
api.skia_swarming.setup(api.path['checkout'].join('tools', 'luci-go'))
# Download the prebuilt chromium binary.
_DownloadAndExtractBinary(api)
# Download Cluster Telemetry binary.
api.ct.download_CT_binary(CT_BINARY)
# Delete swarming_temp_dir to ensure it starts from a clean slate.
api.file.rmtree('swarming temp dir', api.skia_swarming.swarming_temp_dir)
ct_num_slaves = api.properties.get('ct_num_slaves', DEFAULT_CT_NUM_SLAVES)
for slave_num in range(1, ct_num_slaves + 1):
# Download page sets and archives.
api.ct.download_page_artifacts(CT_PAGE_TYPE, slave_num)
# Create this slave's isolated.gen.json file to use for batcharchiving.
isolate_dir = api.path['checkout'].join('chrome')
isolate_path = isolate_dir.join(CT_ISOLATE)
extra_variables = {
'SLAVE_NUM': str(slave_num),
'BENCHMARK': benchmark,
}
api.skia_swarming.create_isolated_gen_json(
isolate_path, isolate_dir, 'linux', 'ct-%s' % slave_num,
extra_variables)
# Batcharchive everything on the isolate server for efficiency.
tasks_to_swarm_hashes = api.skia_swarming.batcharchive(
targets=['ct-%s' % num for num in range(1, ct_num_slaves+1)])
# Sort the list to go through tasks in order.
tasks_to_swarm_hashes.sort()
# Trigger all swarming tasks.
tasks = api.skia_swarming.trigger_swarming_tasks(
tasks_to_swarm_hashes,
dimensions={'os': 'Ubuntu-14.04',
'gpu': '10de:104a',
'cpu': 'x86-64',
'pool': 'Chrome'})
# The format of results_json is described in https://goo.gl/LmRBDk
results_json = {
'master': _GetMasterNameInPerfFormat(api.properties['mastername']),
'bot': api.properties['buildername'],
'chart_data': {},
'point_id': int(api.time.time()),
'versions': {'chromium': api.properties['git_revision']},
'supplemental': {},
}
# Now collect all tasks and populate results_json.
slave_num = 0
num_webpages_reported = 0
for task in tasks:
slave_num += 1
api.skia_swarming.collect_swarming_task(task)
output_dir = api.skia_swarming.tasks_output_dir.join(task.title).join('0')
output_files = api.file.listdir('output dir', output_dir)
if not output_files:
raise api.step.StepFailure(
'No output files were found for slave%d' % slave_num)
slave_data_found = False
# Loop through all output files and gather results in results_json.
for json_output_file in output_files:
json_output = api.json.read(
'read output json', output_dir.join(json_output_file)).json.output
if not json_output:
# The output of some webpages could be empty if they repeatedly crashed.
continue
if not results_json['chart_data']:
# Initialize the chart_data dict since it does not exist yet.
results_json['chart_data'] = json_output
# Empty out the 'summary' values of all fields because we will
# recalculate them in the next section.
results_json_charts = results_json['chart_data']['charts']
for field_name in json_output['charts'].keys():
results_json_charts[field_name]['summary']['values'] = []
# The following block of code does the following:
#
# 1. Loop through all fields.
# 2. Loop through all webpages in the field.
# 3. Add the webpage to results_json_charts[field_name].
# 4. Find all values of the webpage for this field.
# 5. Calculate the average of webpage's values.
# 6. Append the webpage's average to the 'summary' values of this
# field.
# 7. Update the num_webpages_reported variable with the number of
# entries in the 'summary' values of this field.
#
# These steps are done to appropriately update the results_json dict with
# new webpages.
#
results_json_charts = results_json['chart_data']['charts']
# 1. Loop through all fields.
for field_name in json_output['charts'].keys():
slave_data_found = True
# 2. Loop through all webpages in the field.
for webpage_name in json_output['charts'][field_name].keys():
if webpage_name == 'summary':
# We will populate the summary section separately below.
continue
else:
# 3. Add the webpage to results_json_charts[field_name].
results_json_charts[field_name][webpage_name] = (
json_output['charts'][field_name][webpage_name])
# 4. Find all values of the webpage for this field.
values = results_json_charts[field_name][webpage_name]['values']
# 5. Calculate the average of webpage's values.
values_avg = sum(values)/len(values)
# 6. Append the webpage's average to the 'summary' values of this
# field.
results_json_charts[field_name]['summary']['values'].append(
values_avg)
# 7. Update the num_webpages_reported variable with the number of
# entries in the 'summary' values of this field.
num_webpages_reported = max(
len(results_json_charts[field_name]['summary']['values']),
num_webpages_reported)
if not slave_data_found:
# Throw a failure if this slave had no results.
raise api.step.StepFailure('Received no data from slave #%d' % slave_num)
# Add num_webpages_reported as a custom field to charts.
results_json['chart_data']['charts']['num_webpages_reported'] = {
'summary': {
'important': True,
'type': 'scalar',
'name': 'num_webpages_reported',
'std': 0.0,
'units': 'num',
'value': num_webpages_reported,
}
}
# Upload results_json to the perf dashboard only if the number of reported
# webpages matches the threshold.
num_webpages_reported_threshold = api.properties.get(
'num_webpages_reported_threshold', NUM_WEBPAGES_REPORTED_THRESHOLD)
if num_webpages_reported >= num_webpages_reported_threshold:
api.perf_dashboard.set_default_config()
api.perf_dashboard.post(results_json)
# Set build property that displays how many webpages reported results.
api.step.active_result.presentation.properties['Number of webpages'] = (
num_webpages_reported)
def _GetTestJsonOutput(webpage, values, populate_charts=True):
json_output = {
"trace_rerun_options": [],
"format_version": "0.1",
"benchmark_description": "Measures rasterize and record performance for "
"Cluster Telemetry.",
"charts": {},
"benchmark_metadata": {
"rerun_options": [],
"type": "telemetry_benchmark",
"name": "rasterize_and_record_micro_ct",
"description": "Measures rasterize and record performance for "
"Cluster Telemetry."
},
"next_version": "0.2",
"benchmark_name": "rasterize_and_record_micro_ct"
}
if populate_charts:
json_output['charts'] = {
"viewport_picture_size": {
webpage: {
"std": 0.0,
"name": "viewport_picture_size",
"type": "list_of_scalar_values",
"important": True,
"values": values,
"units": "bytes",
"page_id": 0
},
"summary": {
"std": 0.0,
"name": "viewport_picture_size",
"important": True,
"values": values,
"units": "bytes",
"type": "list_of_scalar_values"
}
}
}
return json_output
def GenTests(api):
mastername = 'chromium.perf.fyi'
slavename = 'slave50-c1'
parent_build_archive_url = 'http:/dummy-url.com'
parent_got_swarming_client_revision = '12345'
git_revision = 'xy12z43'
ct_num_slaves = 3
num_webpages_reported_threshold = 3 # set threshold low for tests.
# Slave1 file1 and file2.
json_output_slave1_file1 = _GetTestJsonOutput('http://www.google.com',
[20822, 20824])
json_output_slave1_file2 = _GetTestJsonOutput('http://www.facebook.com',
[208, 210])
# Slave2 file1 and file2.
json_output_slave2_file1 = _GetTestJsonOutput('http://www.amazon.com',
[2, 4])
json_output_slave2_file2 = _GetTestJsonOutput('http://www.twitter.com',
[8, 10])
# Slave3 file1 and file2.
json_output_slave3_file1 = _GetTestJsonOutput('http://www.baidu.com',
[20, 40])
json_output_slave3_file2 = _GetTestJsonOutput('', [], populate_charts=False)
yield(
api.test('CT_Top1k_RR') +
api.override_step_data(
'read output json', api.json.output(json_output_slave1_file1)) +
api.override_step_data(
'read output json (2)', api.json.output(json_output_slave1_file2)) +
api.override_step_data(
'read output json (3)', api.json.output(json_output_slave2_file1)) +
api.override_step_data(
'read output json (4)', api.json.output(json_output_slave2_file2)) +
api.override_step_data(
'read output json (5)', api.json.output(json_output_slave3_file1)) +
api.override_step_data(
'read output json (6)', api.json.output(json_output_slave3_file2)) +
api.properties(
buildername='Linux CT Top1k RR Perf',
mastername=mastername,
slavename=slavename,
parent_build_archive_url=parent_build_archive_url,
parent_got_swarming_client_revision=parent_got_swarming_client_revision,
git_revision=git_revision,
ct_num_slaves=ct_num_slaves,
num_webpages_reported_threshold=num_webpages_reported_threshold,
)
)
yield(
api.test('CT_Top1k_Repaint') +
api.override_step_data(
'read output json', api.json.output(json_output_slave1_file1)) +
api.override_step_data(
'read output json (2)', api.json.output(json_output_slave1_file2)) +
api.override_step_data(
'read output json (3)', api.json.output(json_output_slave2_file1)) +
api.override_step_data(
'read output json (4)', api.json.output(json_output_slave2_file2)) +
api.override_step_data(
'read output json (5)', api.json.output(json_output_slave3_file1)) +
api.override_step_data(
'read output json (6)', api.json.output(json_output_slave3_file2)) +
api.properties(
buildername='Linux CT Top1k Repaint Perf',
mastername=mastername,
slavename=slavename,
parent_build_archive_url=parent_build_archive_url,
parent_got_swarming_client_revision=parent_got_swarming_client_revision,
git_revision=git_revision,
ct_num_slaves=ct_num_slaves,
num_webpages_reported_threshold=num_webpages_reported_threshold,
)
)
yield(
api.test('CT_Top1k_Unsupported') +
api.properties(
buildername='Linux CT Top1k Unsupported Perf',
mastername=mastername,
slavename=slavename,
parent_build_archive_url=parent_build_archive_url,
parent_got_swarming_client_revision=parent_got_swarming_client_revision,
git_revision=git_revision,
ct_num_slaves=ct_num_slaves,
num_webpages_reported_threshold=num_webpages_reported_threshold,
) +
api.expect_exception('Exception')
)
yield(
api.test('CT_Top1k_less_than_threshold') +
api.override_step_data(
'read output json', api.json.output(json_output_slave1_file1)) +
api.override_step_data(
'read output json (2)', api.json.output(json_output_slave1_file2)) +
api.override_step_data(
'read output json (3)', api.json.output(json_output_slave2_file1)) +
api.override_step_data(
'read output json (4)', api.json.output(json_output_slave2_file2)) +
api.override_step_data(
'read output json (5)', api.json.output(json_output_slave3_file1)) +
api.override_step_data(
'read output json (6)', api.json.output(json_output_slave3_file2)) +
api.properties(
buildername='Linux CT Top1k Repaint Perf',
mastername=mastername,
slavename=slavename,
parent_build_archive_url=parent_build_archive_url,
parent_got_swarming_client_revision=parent_got_swarming_client_revision,
git_revision=git_revision,
ct_num_slaves=ct_num_slaves,
num_webpages_reported_threshold=6,
)
)
yield(
api.test('CT_Top1k_slave1_empty_dir') +
api.override_step_data('listdir output dir', api.json.output([])) +
api.properties(
buildername='Linux CT Top1k Repaint Perf',
mastername=mastername,
slavename=slavename,
parent_build_archive_url=parent_build_archive_url,
parent_got_swarming_client_revision=parent_got_swarming_client_revision,
git_revision=git_revision,
ct_num_slaves=ct_num_slaves,
num_webpages_reported_threshold=num_webpages_reported_threshold,
)
)
yield(
api.test('CT_Top1k_slave2_no_output') +
api.override_step_data(
'read output json', api.json.output(json_output_slave1_file1)) +
api.override_step_data(
'read output json (2)', api.json.output(json_output_slave1_file2)) +
api.properties(
buildername='Linux CT Top1k Repaint Perf',
mastername=mastername,
slavename=slavename,
parent_build_archive_url=parent_build_archive_url,
parent_got_swarming_client_revision=parent_got_swarming_client_revision,
git_revision=git_revision,
ct_num_slaves=ct_num_slaves,
num_webpages_reported_threshold=num_webpages_reported_threshold,
)
)
yield(
api.test('CT_Top1k_slave2_failure') +
api.override_step_data(
'read output json', api.json.output(json_output_slave1_file1)) +
api.override_step_data(
'read output json (2)', api.json.output(json_output_slave1_file2)) +
api.step_data('ct-2 on Ubuntu-14.04', retcode=1) +
api.properties(
buildername='Linux CT Top1k RR Perf',
mastername=mastername,
slavename=slavename,
parent_build_archive_url=parent_build_archive_url,
parent_got_swarming_client_revision=parent_got_swarming_client_revision,
git_revision=git_revision,
ct_num_slaves=ct_num_slaves,
num_webpages_reported_threshold=num_webpages_reported_threshold,
)
)
| |
#!/usr/bin/env python
from __future__ import print_function
#import sys
import re
import numpy as np
import numpy.linalg as npl
import numpy.testing as npt
import logging
logging.basicConfig(level=logging.DEBUG)
import unittest
# import the module
import tomographer.tomorun
import tomographer.densedm
import tomographer
class AnalyticalSolutionFn(object):
# see the cxx file test/tomorun/test_tomorun_1qubit_analytic_solution_check.cxx
def __init__(self, n):
self.n = n
#self.c = 2.0 / (15.0 + 16.0*n + 4.0*n*n)
self.lnc = np.log(2.0) - np.log(15.0 + 16.0*n + 4.0*n*n)
def lnvalue(self, f):
return 2*np.log(f) + np.log(1+f) + np.log(1-f) + 2 * self.n * np.log(f) - self.lnc;
def get_histogram_chi2_red(self, hist):
ln_bin_delta = np.log(hist.params.binResolution())
sumwsqdiff = 0
numpts = 0
print(hist.bins)
for k in range(hist.numBins()):
# ignore (near-)zero data points
if hist.count(k) < 1e-12:
continue
fval = hist.params.binCenterValue(k)
valln = np.log(hist.count(k))
errln = hist.errorBar(k) / hist.count(k)
# calculate squared difference to expected theoretical value, and weigh by 1/err
theo_valln = self.lnvalue(fval) + ln_bin_delta;
sumwsqdiff += ((valln - theo_valln) / errln) ** 2
numpts += 1
print("Point {:03d}: k={:03d} hist:{:.3g}+-{:.3g} theoln={:.3g} valln={:.3g} errln={:.3g}"
.format(numpts, k, hist.count(k), hist.errorBar(k), theo_valln, valln, errln))
chi2_red = sumwsqdiff / (numpts - 1)
print("chi2_red={:.4g} ; sumwsqdiff={:.3g}, numpts={}".format(chi2_red, sumwsqdiff, numpts))
return chi2_red
class test_tomorun(unittest.TestCase):
def setUp(self):
self.Emn = [
# +Y
np.array([[0.5, -0.5j],
[0.5j, 0.5]]),
# -Y
np.array([[0.5, 0.5j],
[-0.5j, 0.5]])
]
# tomography data: 500x counts +1, 0x counts -1; when measuring sigma_Y 500 times in total
self.Nm = np.array([ 500, 0 ])
# reference state: +Y
self.rho_ref = np.array([[0.5, -0.5j],
[0.5j, 0.5]])
def test_values_full(self):
print("test_values_full()")
# For debugging --
#logging.getLogger().setLevel(1)
#tomographer.cxxlogger.level = 1
num_repeats = 8
hist_params = tomographer.HistogramParams(0.985, 1, 200)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="fidelity",
ref_state=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=tomographer.MHRWParams(
step_size=0.04,
n_sweep=25,
n_run=32768,
n_therm=1024),
jumps_method="full",
hist_params=hist_params,
progress_fn=lambda report: print(report.getHumanReport()),
progress_interval_ms=500,
ctrl_converged_params={'max_allowed_not_converged': 1,
'max_allowed_unknown_notisolated': 1,
'max_allowed_unknown': 3,},
)
print("Final report of runs :\n{}".format(r['final_report_runs']))
print("Final report of everything :\n{}".format(r['final_report']))
final_histogram = r['final_histogram']
self.assertTrue(isinstance(final_histogram, tomographer.AveragedErrorBarHistogram))
simple_final_histogram = r['simple_final_histogram']
self.assertTrue(isinstance(simple_final_histogram, tomographer.AveragedSimpleRealHistogram))
print("Tomorun completed in {} seconds".format(r['elapsed_seconds']))
for k in range(num_repeats):
runres = r['runs_results'][k]
self.assertTrue(isinstance(runres, tomographer.mhrwtasks.MHRandomWalkTaskResult))
##For debugging --
#for k in range(num_repeats):
# print("Worker #", k, ":", sep='')
# print(repr(r['runs_results'][k].stats_results.histogram))
# print(r['runs_results'][k].stats_results.error_levels)
# now, check the actual values of the result
pok = AnalyticalSolutionFn(np.sum(self.Nm))
# can't be too picky on chi2, because this test will run many times (so it's bound
# to fail at some point!)
self.assertLess(pok.get_histogram_chi2_red(final_histogram), 5)
npt.assert_array_almost_equal(final_histogram.bins, simple_final_histogram.bins)
def test_values_light(self):
print("test_values_light()")
num_repeats = 8
hist_params = tomographer.HistogramParams(0.985, 1, 200)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="fidelity",
ref_state=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=tomographer.MHRWParams(
step_size=0.04,
n_sweep=25,
n_run=32768,
n_therm=1024),
jumps_method="light",
hist_params=hist_params,
progress_fn=lambda report: print(report.getHumanReport()),
progress_interval_ms=500,
ctrl_converged_params={'max_allowed_not_converged': 1,
'max_allowed_unknown_notisolated': 1,
'max_allowed_unknown': 3,}
)
print("Final report of runs :\n{}".format(r['final_report_runs']))
print("Final report of everything :\n{}".format(r['final_report']))
final_histogram = r['final_histogram']
self.assertTrue(isinstance(final_histogram, tomographer.AveragedErrorBarHistogram))
simple_final_histogram = r['simple_final_histogram']
self.assertTrue(isinstance(simple_final_histogram, tomographer.AveragedSimpleRealHistogram))
print("Tomorun completed in {} seconds".format(r['elapsed_seconds']))
for k in range(num_repeats):
runres = r['runs_results'][k]
self.assertTrue(isinstance(runres, tomographer.mhrwtasks.MHRandomWalkTaskResult))
# now, check the actual values of the result
pok = AnalyticalSolutionFn(np.sum(self.Nm))
# can't be too picky on chi2, because this test will run many times (so it's bound
# to fail at some point!)
self.assertLess(pok.get_histogram_chi2_red(final_histogram), 5)
npt.assert_array_almost_equal(final_histogram.bins, simple_final_histogram.bins)
def test_errbar_convergence(self):
print("test_errbar_convergence()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.995, 1, 100)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="fidelity",
ref_state=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=tomographer.MHRWParams(
step_size=0.04,
n_sweep=25,
n_run=8192, # controller will keep running as necessary
n_therm=1024),
hist_params=hist_params,
ctrl_converged_params={'max_allowed_unknown': 2,
'max_allowed_unknown_notisolated': 2,
'max_allowed_not_converged': 0,
# run as long as necessary
'max_add_run_iters': -1},
progress_fn=lambda report: print(report.getHumanReport()),
progress_interval_ms=50
)
print("Final report of runs :\n{}".format(r['final_report_runs']))
# inspect the task runs
for k in range(num_repeats):
runres = r['runs_results'][k]
# check that bins have converged as required
self.assertLessEqual(
(runres.stats_results.converged_status ==
tomographer.BinningAnalysis.NOT_CONVERGED*np.ones([hist_params.num_bins],
dtype=int)).sum(),
0
)
self.assertLessEqual(
(runres.stats_results.converged_status ==
tomographer.BinningAnalysis.UNKNOWN_CONVERGENCE*np.ones([hist_params.num_bins],
dtype=int)).sum(),
2
)
def test_custom_figofmerit(self):
print("test_custom_figofmerit()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.99, 1, 20)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit=lambda T: npl.norm(np.dot(T,T.T.conj())), # purity
ref_state=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=tomographer.MHRWParams(
step_size=0.04,
n_sweep=25,
n_run=8192,
n_therm=1024),
hist_params=hist_params,
progress_fn=lambda report: print(report.getHumanReport()),
progress_interval_ms=100
)
print(r['final_report'])
# just make sure that less than 1% of points are out of [0.99,1]
self.assertLess(r['final_histogram'].off_chart, 0.01)
def test_custom_figofmerit_parallel(self):
print("test_custom_figofmerit_parallel()")
num_repeats = 8
hist_params = tomographer.HistogramParams(0.99, 1, 20)
mhrw_params = tomographer.MHRWParams(
step_size=0.04,
n_sweep=25,
n_run=8192,
n_therm=1024
)
class Ns: pass
glob = Ns()
glob.saw_parallel_runs = False
def is_running(w):
if w is None:
return False
return w.data['kstep'] > (mhrw_params.n_therm+2)*mhrw_params.n_sweep
def prg_fn(report):
print(report.getHumanReport())
num_running = sum([ (1 if is_running(w) else 0) for w in report.workers ])
if num_running > 1:
glob.saw_parallel_runs = num_running
raise Exception("Done, test passed.")
try:
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit=lambda T: npl.norm(np.dot(T,T.T.conj())), # purity
ref_state=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=mhrw_params,
hist_params=hist_params,
progress_fn=prg_fn,
progress_interval_ms=50,
ctrl_step_size_params={'enabled':False},
)
except Exception as e:
if 'Done, test passed' not in str(e):
raise
self.assertGreaterEqual(glob.saw_parallel_runs, 2)
def test_verbose_logging(self):
print("test_verbose_logging()")
oldlevel = tomographer.cxxlogger.level
try:
logging.getLogger().setLevel(1) # LONGDEBUG
tomographer.cxxlogger.level = 1 # LONGDEBUG
def prg_fn(report):
logging.getLogger("prg_fn").debug(report.getHumanReport())
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit=lambda T: npl.norm(np.dot(T,T.T.conj())), # purity
ref_state=self.rho_ref,
num_repeats = 4,
hist_params = tomographer.HistogramParams(0.99, 1, 20),
mhrw_params = tomographer.MHRWParams(
step_size=0.04,
# keep it REAL SHORT because we'll have tons of messages
n_sweep=5,
n_run=10,
n_therm=20),
progress_fn=prg_fn,
progress_interval_ms=50,
ctrl_step_size_params={'enabled':False},
)
finally:
# restore level
logging.getLogger().setLevel(oldlevel)
tomographer.cxxlogger.level = oldlevel
def test_verbose_logging_2(self):
print("test_verbose_logging_2()")
oldlevel = tomographer.cxxlogger.level
try:
logging.getLogger().setLevel(1) # LONGDEBUG
tomographer.cxxlogger.level = 1 # LONGDEBUG
def raise_stop_iter(T):
raise StopIteration
def prg_fn(report):
logging.getLogger("prg_fn").debug(report.getHumanReport())
# test: gets interrupted (test fix for some SEGFAULT I'm getting?)
with self.assertRaises(StopIteration):
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit=raise_stop_iter,
num_repeats = 4,
hist_params = tomographer.HistogramParams(0.99, 1, 20),
mhrw_params = tomographer.MHRWParams(
step_size=0.04,
# keep it REAL SHORT because we'll have tons of messages
n_sweep=5,
n_run=10,
n_therm=20),
progress_fn=prg_fn,
progress_interval_ms=50,
ctrl_step_size_params={'enabled':False},
)
finally:
# restore level
logging.getLogger().setLevel(oldlevel)
tomographer.cxxlogger.level = oldlevel
def test_too_few_runs(self):
print("test_too_few_runs()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.99, 1, 10)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="fidelity",
ref_state=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=tomographer.MHRWParams(0.04, 25, 500, 100), # 100 is too few runs
hist_params=hist_params,
ctrl_step_size_params={'enabled':False},
ctrl_converged_params={'enabled':False},
)
def test_mhwalker_param_1(self):
print("test_mhwalker_param_1()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.99, 1, 10)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="fidelity",
ref_state=self.rho_ref,
num_repeats=num_repeats,
# understands step size given as positional argument? ---
mhrw_params=tomographer.MHRWParams(0.04, 25, 500, 1024),
# ---
hist_params=hist_params,
ctrl_step_size_params={'enabled':False},
ctrl_converged_params={'enabled':False},
)
print(r['final_report'])
for rw in r['runs_results']:
self.assertAlmostEqual(rw.mhrw_params.mhwalker_params["step_size"], 0.04)
def test_mhwalker_param_2(self):
print("test_mhwalker_param_2()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.99, 1, 10)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="fidelity",
ref_state=self.rho_ref,
num_repeats=num_repeats,
# understands step size given as keyword argument? ---
mhrw_params=tomographer.MHRWParams(step_size=0.04, n_sweep=25, n_therm=1024, n_run=1024),
# ---
hist_params=hist_params,
ctrl_step_size_params={'enabled':False},
ctrl_converged_params={'enabled':False},
)
print(r['final_report'])
for rw in r['runs_results']:
self.assertAlmostEqual(rw.mhrw_params.mhwalker_params["step_size"], 0.04)
def test_mhwalker_param_3(self):
print("test_mhwalker_param_3()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.99, 1, 10)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="fidelity",
ref_state=self.rho_ref,
num_repeats=num_repeats,
# understands step size given as dictionary? ---
mhrw_params=tomographer.MHRWParams({'step_size': 0.04}, 25, 500, 1024),
# ---
hist_params=hist_params,
ctrl_step_size_params={'enabled':False},
ctrl_converged_params={'enabled':False},
)
print(r['final_report'])
for rw in r['runs_results']:
self.assertAlmostEqual(rw.mhrw_params.mhwalker_params["step_size"], 0.04)
def test_mhwalker_param_4(self):
print("test_mhwalker_param_4()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.99, 1, 10)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="fidelity",
ref_state=self.rho_ref,
num_repeats=num_repeats,
# understands 'None'? ---
mhrw_params=tomographer.MHRWParams(None, 25, 500, 1024),
# ---
hist_params=hist_params,
ctrl_step_size_params={'enabled':True}, # auto-adjust
ctrl_converged_params={'enabled':False},
)
print(r['final_report'])
for rw in r['runs_results']:
self.assertLessEqual(rw.mhrw_params.mhwalker_params["step_size"], 0.1)
self.assertGreaterEqual(rw.mhrw_params.mhwalker_params["step_size"], 0.005)
def test_mhwalker_param_5(self):
print("test_mhwalker_param_5()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.99, 1, 10)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="fidelity",
ref_state=self.rho_ref,
num_repeats=num_repeats,
# understands missing key? ---
mhrw_params=tomographer.MHRWParams({}, 25, 500, 1024),
# ---
hist_params=hist_params,
ctrl_step_size_params={'enabled':True}, # auto-adjust
ctrl_converged_params={'enabled':False},
)
print(r['final_report'])
for rw in r['runs_results']:
self.assertLessEqual(rw.mhrw_params.mhwalker_params["step_size"], 0.1)
self.assertGreaterEqual(rw.mhrw_params.mhwalker_params["step_size"], 0.005)
def test_callback(self):
print("test_callback()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.985, 1, 200)
class Ns: pass
glob = Ns()
glob.num_callback_calls = 0
def progress_callback(fullstatusreport):
glob.num_callback_calls += 1
print(fullstatusreport.getHumanReport())
intvl_ms = 200
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="obs-value",
observable=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=tomographer.MHRWParams(
step_size=0.04,
n_sweep=25,
n_run=4*32768,
n_therm=1024),
hist_params=hist_params,
progress_fn=progress_callback,
progress_interval_ms=intvl_ms,
ctrl_converged_params={'enabled': False},
)
# we have the total elapsed time in r['elapsed_seconds']
print("Total elapsed: {:.2g} seconds".format(r['elapsed_seconds']))
nc = 1000*r['elapsed_seconds']/intvl_ms
self.assertGreaterEqual(glob.num_callback_calls, 1)
self.assertLessEqual(glob.num_callback_calls, nc+2)
def test_error_in_callback(self):
print("test_error_in_callback()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.985, 1, 200)
class Ns: pass
#
# Make sure an error in the callback raises an Exception
#
def progress_callback(fullstatusreport):
error-xxx(xyz) # error -- raises a Python exception
print(fullstatusreport.getHumanReport())
intvl_ms = 200
with self.assertRaises(Exception):
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="obs-value",
observable=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=tomographer.MHRWParams(
step_size=0.04,
n_sweep=25,
n_run=4*32768,
n_therm=1024),
hist_params=hist_params,
progress_fn=progress_callback,
progress_interval_ms=intvl_ms,
)
def test_convergence_control_1(self):
print("test_convergence_control_1()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.985, 1, 50)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="obs-value",
observable=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=tomographer.MHRWParams(
step_size=0.04,
n_sweep=25,
n_run=1024, # ridicoulously low
n_therm=200),
hist_params=hist_params,
progress_fn=lambda report: print(report.getHumanReport()),
progress_interval_ms=100,
ctrl_converged_params={'enabled': True,
'max_allowed_unknown': 1,
'max_allowed_unknown_notisolated': 1,
'max_allowed_not_converged': 1,
# run as long as is necessary
'max_add_run_iters': -1 },
)
for runres in r['runs_results']:
summary = runres.stats_results.errorBarConvergenceSummary()
self.assertLessEqual(summary.n_unknown, 1)
self.assertLessEqual(summary.n_unknown-summary.n_unknown_isolated, 1)
self.assertLessEqual(summary.n_not_converged, 1)
def test_convergence_control_2(self):
print("test_convergence_control_2()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.985, 1, 50)
class Ns: pass
glob = Ns()
glob.num_callback_calls = 0
def check_prg(report):
glob.num_callback_calls += 1
for r in report.workers:
if r is not None and r.fraction_done > 1:
raise AssertionError("Control is disabled, random walk should not go past 100%")
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="obs-value",
observable=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=tomographer.MHRWParams(
step_size=0.1, # too high
n_sweep=100,
n_run=1024, # little
n_therm=1024), # little
hist_params=hist_params,
progress_fn=check_prg,
progress_interval_ms=1,
ctrl_converged_params={'enabled': False},
# keep step size a little off, to prevent bins from converging too nicely:
ctrl_step_size_params={'enabled': False},
)
# make sure progress reporter was called often enough
self.assertGreaterEqual(glob.num_callback_calls, 10)
def test_stepsize_control_1(self):
print("test_stepsize_control_1()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.985, 1, 50)
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="obs-value",
observable=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=tomographer.MHRWParams(
step_size=0.5, # must be adjusted
n_sweep=2,
n_run=1024,
n_therm=1),
hist_params=hist_params,
progress_fn=lambda report: print(report.getHumanReport()),
progress_interval_ms=100,
ctrl_step_size_params={'enabled': True},
ctrl_converged_params={'enabled': False},
)
for runres in r['runs_results']:
print("Step size is ",runres.mhrw_params.mhwalker_params['step_size'])
print("and n_therm is ",runres.mhrw_params.n_therm)
self.assertLessEqual( runres.mhrw_params.mhwalker_params['step_size'], 0.1 )
self.assertGreaterEqual( runres.mhrw_params.mhwalker_params['step_size'], 0.01 )
self.assertGreaterEqual( runres.acceptance_ratio, 0.2 )
self.assertLessEqual( runres.acceptance_ratio, 0.4 )
self.assertGreaterEqual( runres.mhrw_params.n_therm , 20 ) # make sure it was adjusted
def test_stepsize_control_2(self):
print("test_stepsize_control_2()")
num_repeats = 2
hist_params = tomographer.HistogramParams(0.985, 1, 50)
orig_step_size = 0.5
orig_n_therm = 2
r = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit="obs-value",
observable=self.rho_ref,
num_repeats=num_repeats,
mhrw_params=tomographer.MHRWParams(
step_size=orig_step_size, # must be adjusted
n_sweep=2,
n_run=1024,
n_therm=orig_n_therm),
hist_params=hist_params,
progress_fn=lambda report: print(report.getHumanReport()),
progress_interval_ms=100,
ctrl_step_size_params={'enabled': False},
ctrl_converged_params={'enabled': False},
)
for runres in r['runs_results']:
print("Step size is ",runres.mhrw_params.mhwalker_params['step_size'])
print("and n_therm is ",runres.mhrw_params.n_therm)
# make sure it wasn't adjusted
self.assertAlmostEqual( runres.mhrw_params.mhwalker_params['step_size'], orig_step_size )
self.assertEqual( runres.mhrw_params.n_therm, orig_n_therm )
def test_base_seed(self):
print("test_base_seed()")
hist_params = tomographer.HistogramParams(0.985, 1, 50)
samples = []
def save_sample(T):
samples.append(T)
return 0
# momentarily disable warnings because tomorun will warn about binning levels too low
oldlevel = logging.getLogger("tomographer").level
try:
logging.getLogger("tomographer").level = logging.ERROR
runs_samples = []
for n in range(64):
# reset samples list, go
print("Tomorun run #", n)
samples = []
res = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit=save_sample,
num_repeats=1,
mhrw_params=tomographer.MHRWParams(
step_size=0.04,
n_sweep=25,
n_therm=10, # don't let it thermalize too much
n_run=1024,),
hist_params=hist_params,
ctrl_step_size_params={'enabled': False},
ctrl_converged_params={'enabled': False},
rng_base_seed=n
)
runs_samples.append(samples[10])
finally:
logging.getLogger("tomographer").level = oldlevel
avgT = sum( runs_samples ) / len(runs_samples)
print("avgT = ", repr(avgT))
spread = np.prod( [npl.norm( T - avgT ) for T in runs_samples] )
print("spread = ", repr(spread))
self.assertGreater(spread, 0.2)
def test_chokes_on_extra_args(self):
# just make sure that tomorun() raises an exception if unexpected arguments are
# provided.
with self.assertRaises(tomographer.tomorun.TomorunInvalidInputError):
res = tomographer.tomorun.tomorun(
dim=2,
Emn=self.Emn,
Nm=self.Nm,
fig_of_merit='obs-value',
observable=self.rho_ref,
num_repeats=1,
mhrw_params=tomographer.MHRWParams(
step_size=0.04,
n_sweep=25,
n_therm=1024,
n_run=1024,),
hist_params=tomographer.HistogramParams(),
base_seed=1234567890, # misspelled, should be "rng_base_seed"
abc_wrong={'x': 'y'} # just an additional arg
)
# normally, this is not needed as we are being run via pyruntest.py, but it might be
# useful if we want to run individually picked tests
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) 2015 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
# Andreas Hansson
import optparse
import random
import sys
import m5
from m5.objects import *
# This example script stress tests the memory system by creating false
# sharing in a tree topology. At the bottom of the tree is a shared
# memory, and then at each level a number of testers are attached,
# along with a number of caches that them selves fan out to subtrees
# of testers and caches. Thus, it is possible to create a system with
# arbitrarily deep cache hierarchies, sharing or no sharing of caches,
# and testers not only at the L1s, but also at the L2s, L3s etc.
parser = optparse.OptionParser()
parser.add_option("-a", "--atomic", action="store_true",
help="Use atomic (non-timing) mode")
parser.add_option("-b", "--blocking", action="store_true",
help="Use blocking caches")
parser.add_option("-l", "--maxloads", metavar="N", default=0,
help="Stop after N loads")
parser.add_option("-m", "--maxtick", type="int", default=m5.MaxTick,
metavar="T",
help="Stop after T ticks")
# The tree specification consists of two colon-separated lists of one
# or more integers, one for the caches, and one for the testers. The
# first integer is the number of caches/testers closest to main
# memory. Each cache then fans out to a subtree. The last integer in
# the list is the number of caches/testers associated with the
# uppermost level of memory. The other integers (if any) specify the
# number of caches/testers connected at each level of the crossbar
# hierarchy. The tester string should have one element more than the
# cache string as there should always be testers attached to the
# uppermost caches.
parser.add_option("-c", "--caches", type="string", default="2:2:1",
help="Colon-separated cache hierarchy specification, "
"see script comments for details "
"[default: %default]")
parser.add_option("-t", "--testers", type="string", default="1:1:0:2",
help="Colon-separated tester hierarchy specification, "
"see script comments for details "
"[default: %default]")
parser.add_option("-f", "--functional", type="int", default=10,
metavar="PCT",
help="Target percentage of functional accesses "
"[default: %default]")
parser.add_option("-u", "--uncacheable", type="int", default=10,
metavar="PCT",
help="Target percentage of uncacheable accesses "
"[default: %default]")
parser.add_option("-r", "--random", action="store_true",
help="Generate a random tree topology")
parser.add_option("--progress", type="int", default=100000,
metavar="NLOADS",
help="Progress message interval "
"[default: %default]")
parser.add_option("--sys-clock", action="store", type="string",
default='1GHz',
help = """Top-level clock for blocks running at system
speed""")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# Get the total number of testers
def numtesters(cachespec, testerspec):
# Determine the tester multiplier for each level as the
# elements are per subsystem and it fans out
multiplier = [1]
for c in cachespec:
multiplier.append(multiplier[-1] * c)
total = 0
for t, m in zip(testerspec, multiplier):
total += t * m
return total
block_size = 64
# Start by parsing the command line options and do some basic sanity
# checking
if options.random:
# Generate a tree with a valid number of testers
while True:
tree_depth = random.randint(1, 4)
cachespec = [random.randint(1, 3) for i in range(tree_depth)]
testerspec = [random.randint(1, 3) for i in range(tree_depth + 1)]
if numtesters(cachespec, testerspec) < block_size:
break
print "Generated random tree -c", ':'.join(map(str, cachespec)), \
"-t", ':'.join(map(str, testerspec))
else:
try:
cachespec = [int(x) for x in options.caches.split(':')]
testerspec = [int(x) for x in options.testers.split(':')]
except:
print "Error: Unable to parse caches or testers option"
sys.exit(1)
if len(cachespec) < 1:
print "Error: Must have at least one level of caches"
sys.exit(1)
if len(cachespec) != len(testerspec) - 1:
print "Error: Testers must have one element more than caches"
sys.exit(1)
if testerspec[-1] == 0:
print "Error: Must have testers at the uppermost level"
sys.exit(1)
for t in testerspec:
if t < 0:
print "Error: Cannot have a negative number of testers"
sys.exit(1)
for c in cachespec:
if c < 1:
print "Error: Must have 1 or more caches at each level"
sys.exit(1)
if numtesters(cachespec, testerspec) > block_size:
print "Error: Limited to %s testers because of false sharing" \
% (block_size)
sys.exit(1)
# Define a prototype L1 cache that we scale for all successive levels
proto_l1 = Cache(size = '32kB', assoc = 4,
tag_latency = 1, data_latency = 1, response_latency = 1,
tgts_per_mshr = 8, clusivity = 'mostly_incl',
writeback_clean = True)
if options.blocking:
proto_l1.mshrs = 1
else:
proto_l1.mshrs = 4
cache_proto = [proto_l1]
# Now add additional cache levels (if any) by scaling L1 params, the
# first element is Ln, and the last element L1
for scale in cachespec[:-1]:
# Clone previous level and update params
prev = cache_proto[0]
next = prev()
next.size = prev.size * scale
next.tag_latency = prev.tag_latency * 10
next.data_latency = prev.data_latency * 10
next.response_latency = prev.response_latency * 10
next.assoc = prev.assoc * scale
next.mshrs = prev.mshrs * scale
# Swap the inclusivity/exclusivity at each level. L2 is mostly
# exclusive with respect to L1, L3 mostly inclusive, L4 mostly
# exclusive etc.
next.writeback_clean = not prev.writeback_clean
if (prev.clusivity.value == 'mostly_incl'):
next.clusivity = 'mostly_excl'
else:
next.clusivity = 'mostly_incl'
cache_proto.insert(0, next)
# Make a prototype for the tester to be used throughout
proto_tester = MemTest(max_loads = options.maxloads,
percent_functional = options.functional,
percent_uncacheable = options.uncacheable,
progress_interval = options.progress)
# Set up the system along with a simple memory and reference memory
system = System(physmem = SimpleMemory(),
cache_line_size = block_size)
system.voltage_domain = VoltageDomain(voltage = '1V')
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
# For each level, track the next subsys index to use
next_subsys_index = [0] * (len(cachespec) + 1)
# Recursive function to create a sub-tree of the cache and tester
# hierarchy
def make_cache_level(ncaches, prototypes, level, next_cache):
global next_subsys_index, proto_l1, testerspec, proto_tester
index = next_subsys_index[level]
next_subsys_index[level] += 1
# Create a subsystem to contain the crossbar and caches, and
# any testers
subsys = SubSystem()
setattr(system, 'l%dsubsys%d' % (level, index), subsys)
# The levels are indexing backwards through the list
ntesters = testerspec[len(cachespec) - level]
# Scale the progress threshold as testers higher up in the tree
# (smaller level) get a smaller portion of the overall bandwidth,
# and also make the interval of packet injection longer for the
# testers closer to the memory (larger level) to prevent them
# hogging all the bandwidth
limit = (len(cachespec) - level + 1) * 100000000
testers = [proto_tester(interval = 10 * (level * level + 1),
progress_check = limit) \
for i in xrange(ntesters)]
if ntesters:
subsys.tester = testers
if level != 0:
# Create a crossbar and add it to the subsystem, note that
# we do this even with a single element on this level
xbar = L2XBar()
subsys.xbar = xbar
if next_cache:
xbar.master = next_cache.cpu_side
# Create and connect the caches, both the ones fanning out
# to create the tree, and the ones used to connect testers
# on this level
tree_caches = [prototypes[0]() for i in xrange(ncaches[0])]
tester_caches = [proto_l1() for i in xrange(ntesters)]
subsys.cache = tester_caches + tree_caches
for cache in tree_caches:
cache.mem_side = xbar.slave
make_cache_level(ncaches[1:], prototypes[1:], level - 1, cache)
for tester, cache in zip(testers, tester_caches):
tester.port = cache.cpu_side
cache.mem_side = xbar.slave
else:
if not next_cache:
print "Error: No next-level cache at top level"
sys.exit(1)
if ntesters > 1:
# Create a crossbar and add it to the subsystem
xbar = L2XBar()
subsys.xbar = xbar
xbar.master = next_cache.cpu_side
for tester in testers:
tester.port = xbar.slave
else:
# Single tester
testers[0].port = next_cache.cpu_side
# Top level call to create the cache hierarchy, bottom up
make_cache_level(cachespec, cache_proto, len(cachespec), None)
# Connect the lowest level crossbar to the memory
last_subsys = getattr(system, 'l%dsubsys0' % len(cachespec))
last_subsys.xbar.master = system.physmem.port
last_subsys.xbar.point_of_coherency = True
root = Root(full_system = False, system = system)
if options.atomic:
root.system.mem_mode = 'atomic'
else:
root.system.mem_mode = 'timing'
# The system port is never used in the tester so merely connect it
# to avoid problems
root.system.system_port = last_subsys.xbar.slave
# Instantiate configuration
m5.instantiate()
# Simulate until program terminates
exit_event = m5.simulate(options.maxtick)
print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
| |
from __future__ import unicode_literals
import mock
from io import BytesIO
from textwrap import dedent
from diff_cover.diff_reporter import BaseDiffReporter
from diff_cover.violations_reporter import BaseViolationReporter, Violation
from diff_cover.report_generator import (
BaseReportGenerator, HtmlReportGenerator,
StringReportGenerator, TemplateReportGenerator
)
from diff_cover.tests.helpers import (
load_fixture, assert_long_str_equal, unittest
)
class SimpleReportGenerator(BaseReportGenerator):
"""
Bare-bones concrete implementation of a report generator.
"""
def __init__(self, cover, diff):
super(SimpleReportGenerator, self).__init__(cover, diff)
def generate_report(self, output_file):
pass
class BaseReportGeneratorTest(unittest.TestCase):
"""
Base class for constructing test cases of report generators.
"""
# Test data, returned by default from the mocks
SRC_PATHS = set(['file1.py', 'subdir/file2.py'])
LINES = [2, 3, 4, 5, 10, 11, 12, 13, 14, 15]
VIOLATIONS = [Violation(n, None) for n in (10, 11, 20)]
MEASURED = [1, 2, 3, 4, 7, 10, 11, 15, 20, 30]
XML_REPORT_NAME = ["reports/coverage.xml"]
DIFF_REPORT_NAME = "master"
# Subclasses override this to provide the class under test
REPORT_GENERATOR_CLASS = None
# Snippet returned by the mock
SNIPPET = u"<div>Snippet with \u1235 \u8292 unicode</div>"
SNIPPET_STYLE = '.css { color:red }'
def setUp(self):
# Create mocks of the dependencies
self.coverage = mock.MagicMock(BaseViolationReporter)
self.diff = mock.MagicMock(BaseDiffReporter)
self.addCleanup(mock.patch.stopall)
# Patch snippet loading to always return the same string
self._load_snippets_html = mock.patch(
'diff_cover.snippets.Snippet.load_snippets_html'
).start()
self.set_num_snippets(0)
# Patch snippet style
style_defs = mock.patch(
'diff_cover.snippets.Snippet.style_defs'
).start()
style_defs.return_value = self.SNIPPET_STYLE
# Set the names of the XML and diff reports
self.coverage.name.return_value = self.XML_REPORT_NAME
self.diff.name.return_value = self.DIFF_REPORT_NAME
# Configure the mocks
self.set_src_paths_changed([])
self._lines_dict = dict()
self.diff.lines_changed.side_effect = self._lines_dict.get
self._violations_dict = dict()
self.coverage.violations.side_effect = self._violations_dict.get
self._measured_dict = dict()
self.coverage.measured_lines.side_effect = self._measured_dict.get
# Create a concrete instance of a report generator
self.report = self.REPORT_GENERATOR_CLASS(self.coverage, self.diff)
def set_src_paths_changed(self, src_paths):
"""
Patch the dependency `src_paths_changed()` return value
"""
self.diff.src_paths_changed.return_value = src_paths
def set_lines_changed(self, src_path, lines):
"""
Patch the dependency `lines_changed()` to return
`lines` when called with argument `src_path`.
"""
self._lines_dict.update({src_path: lines})
def set_violations(self, src_path, violations):
"""
Patch the dependency `violations()` to return
`violations` when called with argument `src_path`.
"""
self._violations_dict.update({src_path: violations})
def set_measured(self, src_path, measured):
"""
Patch the dependency `measured_lines()` return
`measured` when called with argument `src_path`.
"""
self._measured_dict.update({src_path: measured})
def set_num_snippets(self, num_snippets):
"""
Patch the depdenency `Snippet.load_snippets_html()`
to return `num_snippets` of the fake snippet HTML.
"""
self._load_snippets_html.return_value = \
num_snippets * [self.SNIPPET]
def use_default_values(self):
"""
Configure the mocks to use default values
provided by class constants.
All source files are given the same line, violation,
and measured information.
"""
self.set_src_paths_changed(self.SRC_PATHS)
for src in self.SRC_PATHS:
self.set_lines_changed(src, self.LINES)
self.set_violations(src, self.VIOLATIONS)
self.set_measured(src, self.MEASURED)
self.set_num_snippets(0)
def assert_report(self, expected):
"""
Generate a report and assert that it matches
the string `expected`.
"""
# Create a buffer for the output
output = BytesIO()
# Generate the report
self.report.generate_report(output)
# Get the output
output_str = output.getvalue()
output.close()
# Verify that we got the expected string
assert_long_str_equal(expected, output_str, strip=True)
class SimpleReportGeneratorTest(BaseReportGeneratorTest):
REPORT_GENERATOR_CLASS = SimpleReportGenerator
def setUp(self):
super(SimpleReportGeneratorTest, self).setUp()
self.use_default_values()
def test_src_paths(self):
self.assertEqual(self.report.src_paths(), self.SRC_PATHS)
def test_coverage_name(self):
self.assertEqual(self.report.coverage_report_name(),
self.XML_REPORT_NAME)
def test_diff_name(self):
self.assertEqual(self.report.diff_report_name(),
self.DIFF_REPORT_NAME)
def test_percent_covered(self):
# Check that we get the expected coverage percentages
# By construction, both files have the same diff line
# and coverage information
# There are 6 lines that are both in the diff and measured,
# and 4 of those are covered.
for src_path in self.SRC_PATHS:
self.assertAlmostEqual(
self.report.percent_covered(src_path),
4.0 / 6 * 100)
def test_violation_lines(self):
# By construction, each file has the same coverage information
expected = [10, 11]
for src_path in self.SRC_PATHS:
self.assertEqual(self.report.violation_lines(src_path), expected)
def test_src_with_no_info(self):
self.assertNotIn('unknown.py', self.report.src_paths())
self.assertIs(self.report.percent_covered('unknown.py'), None)
self.assertEqual(self.report.violation_lines('unknown.py'), [])
def test_src_paths_not_measured(self):
# Configure one of the source files to have no coverage info
self.set_measured('file1.py', [])
self.set_violations('file1.py', [])
# Expect that we treat the file like it doesn't exist
self.assertNotIn('file1.py', self.report.src_paths())
self.assertIs(self.report.percent_covered('file1.py'), None)
self.assertEqual(self.report.violation_lines('file1.py'), [])
def test_total_num_lines(self):
# By construction, each source file has the same coverage info
num_lines_in_file = len(set(self.MEASURED).intersection(self.LINES))
expected = len(self.SRC_PATHS) * num_lines_in_file
self.assertEqual(self.report.total_num_lines(), expected)
def test_total_num_missing(self):
# By construction, each source file has the same coverage info,
# in which 3 lines are uncovered, 2 of which are changed
expected = len(self.SRC_PATHS) * 2
self.assertEqual(self.report.total_num_violations(), expected)
def test_total_percent_covered(self):
# Since each file has the same coverage info,
# the total percent covered is the same as each file
# individually.
self.assertEqual(self.report.total_percent_covered(), 66)
class TemplateReportGeneratorTest(BaseReportGeneratorTest):
REPORT_GENERATOR_CLASS = TemplateReportGenerator
def _test_input_expected_output(self, input_with_expected_output):
for test_input, expected_output in input_with_expected_output:
self.assertEqual(expected_output,
TemplateReportGenerator.combine_adjacent_lines(test_input))
def test_combine_adjacent_lines_no_adjacent(self):
in_out = [([1, 3], ["1", "3"]),
([1, 5, 7, 10], ["1", "5", "7", "10"])]
self._test_input_expected_output(in_out)
def test_combine_adjacent_lines(self):
in_out = [([1, 2, 3, 4, 5, 8, 10, 12, 13, 14, 15], ["1-5", "8", "10", "12-15"]),
([1, 4, 5, 6, 10], ["1", "4-6", "10"]),
([402, 403], ["402-403"])]
self._test_input_expected_output(in_out)
def test_empty_list(self):
self.assertEqual([], TemplateReportGenerator.combine_adjacent_lines([]))
def test_one_number(self):
self.assertEqual(["1"], TemplateReportGenerator.combine_adjacent_lines([1]))
class StringReportGeneratorTest(BaseReportGeneratorTest):
REPORT_GENERATOR_CLASS = StringReportGenerator
def test_generate_report(self):
# Generate a default report
self.use_default_values()
# Verify that we got the expected string
expected = dedent("""
-------------
Diff Coverage
Diff: master
-------------
file1.py (66.7%): Missing lines 10-11
subdir/file2.py (66.7%): Missing lines 10-11
-------------
Total: 12 lines
Missing: 4 lines
Coverage: 66%
-------------
""").strip()
self.assert_report(expected)
def test_hundred_percent(self):
# Have the dependencies return an empty report
self.set_src_paths_changed(['file.py'])
self.set_lines_changed('file.py', [line for line in range(0, 100)])
self.set_violations('file.py', [])
self.set_measured('file.py', [2])
expected = dedent("""
-------------
Diff Coverage
Diff: master
-------------
file.py (100%)
-------------
Total: 1 line
Missing: 0 lines
Coverage: 100%
-------------
""").strip()
self.assert_report(expected)
def test_empty_report(self):
# Have the dependencies return an empty report
# (this is the default)
expected = dedent("""
-------------
Diff Coverage
Diff: master
-------------
No lines with coverage information in this diff.
-------------
""").strip()
self.assert_report(expected)
class HtmlReportGeneratorTest(BaseReportGeneratorTest):
REPORT_GENERATOR_CLASS = HtmlReportGenerator
def test_generate_report(self):
self.use_default_values()
expected = load_fixture('html_report.html')
self.assert_report(expected)
def test_empty_report(self):
# Have the dependencies return an empty report
# (this is the default)
# Verify that we got the expected string
expected = load_fixture('html_report_empty.html')
self.assert_report(expected)
def test_one_snippet(self):
self.use_default_values()
# Have the snippet loader always report
# provide one snippet (for every source file)
self.set_num_snippets(1)
# Verify that we got the expected string
expected = load_fixture('html_report_one_snippet.html').strip()
self.assert_report(expected)
def test_multiple_snippets(self):
self.use_default_values()
# Have the snippet loader always report
# multiple snippets for each source file
self.set_num_snippets(2)
# Verify that we got the expected string
expected = load_fixture('html_report_two_snippets.html').strip()
self.assert_report(expected)
| |
# Copyright 2013-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for external dependencies useful for
# development purposes, such as testing, debugging, etc..
from __future__ import annotations
import glob
import os
import re
import pathlib
import shutil
import typing as T
from mesonbuild.interpreterbase.decorators import FeatureDeprecated
from .. import mesonlib, mlog
from ..compilers import AppleClangCCompiler, AppleClangCPPCompiler, detect_compiler_for
from ..environment import get_llvm_tool_names
from ..mesonlib import version_compare, stringlistify, extract_as_list, MachineChoice
from .base import DependencyException, DependencyMethods, strip_system_libdirs, SystemDependency
from .cmake import CMakeDependency
from .configtool import ConfigToolDependency
from .factory import DependencyFactory
from .misc import threads_factory
from .pkgconfig import PkgConfigDependency
if T.TYPE_CHECKING:
from ..envconfig import MachineInfo
from .. environment import Environment
from typing_extensions import TypedDict
class JNISystemDependencyKW(TypedDict):
modules: T.List[str]
# FIXME: When dependency() moves to typed Kwargs, this should inherit
# from its TypedDict type.
version: T.Optional[str]
def get_shared_library_suffix(environment: 'Environment', for_machine: MachineChoice) -> str:
"""This is only guaranteed to work for languages that compile to machine
code, not for languages like C# that use a bytecode and always end in .dll
"""
m = environment.machines[for_machine]
if m.is_windows():
return '.dll'
elif m.is_darwin():
return '.dylib'
return '.so'
class GTestDependencySystem(SystemDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:
super().__init__(name, environment, kwargs, language='cpp')
self.main = kwargs.get('main', False)
self.src_dirs = ['/usr/src/gtest/src', '/usr/src/googletest/googletest/src']
if not self._add_sub_dependency(threads_factory(environment, self.for_machine, {})):
self.is_found = False
return
self.detect()
def detect(self) -> None:
gtest_detect = self.clib_compiler.find_library("gtest", self.env, [])
gtest_main_detect = self.clib_compiler.find_library("gtest_main", self.env, [])
if gtest_detect and (not self.main or gtest_main_detect):
self.is_found = True
self.compile_args = []
self.link_args = gtest_detect
if self.main:
self.link_args += gtest_main_detect
self.sources = []
self.prebuilt = True
elif self.detect_srcdir():
self.is_found = True
self.compile_args = ['-I' + d for d in self.src_include_dirs]
self.link_args = []
if self.main:
self.sources = [self.all_src, self.main_src]
else:
self.sources = [self.all_src]
self.prebuilt = False
else:
self.is_found = False
def detect_srcdir(self) -> bool:
for s in self.src_dirs:
if os.path.exists(s):
self.src_dir = s
self.all_src = mesonlib.File.from_absolute_file(
os.path.join(self.src_dir, 'gtest-all.cc'))
self.main_src = mesonlib.File.from_absolute_file(
os.path.join(self.src_dir, 'gtest_main.cc'))
self.src_include_dirs = [os.path.normpath(os.path.join(self.src_dir, '..')),
os.path.normpath(os.path.join(self.src_dir, '../include')),
]
return True
return False
def log_info(self) -> str:
if self.prebuilt:
return 'prebuilt'
else:
return 'building self'
def log_tried(self) -> str:
return 'system'
class GTestDependencyPC(PkgConfigDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
assert name == 'gtest'
if kwargs.get('main'):
name = 'gtest_main'
super().__init__(name, environment, kwargs)
class GMockDependencySystem(SystemDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:
super().__init__(name, environment, kwargs, language='cpp')
self.main = kwargs.get('main', False)
if not self._add_sub_dependency(threads_factory(environment, self.for_machine, {})):
self.is_found = False
return
# If we are getting main() from GMock, we definitely
# want to avoid linking in main() from GTest
gtest_kwargs = kwargs.copy()
if self.main:
gtest_kwargs['main'] = False
# GMock without GTest is pretty much useless
# this also mimics the structure given in WrapDB,
# where GMock always pulls in GTest
found = self._add_sub_dependency(gtest_factory(environment, self.for_machine, gtest_kwargs))
if not found:
self.is_found = False
return
# GMock may be a library or just source.
# Work with both.
gmock_detect = self.clib_compiler.find_library("gmock", self.env, [])
gmock_main_detect = self.clib_compiler.find_library("gmock_main", self.env, [])
if gmock_detect and (not self.main or gmock_main_detect):
self.is_found = True
self.link_args += gmock_detect
if self.main:
self.link_args += gmock_main_detect
self.prebuilt = True
return
for d in ['/usr/src/googletest/googlemock/src', '/usr/src/gmock/src', '/usr/src/gmock']:
if os.path.exists(d):
self.is_found = True
# Yes, we need both because there are multiple
# versions of gmock that do different things.
d2 = os.path.normpath(os.path.join(d, '..'))
self.compile_args += ['-I' + d, '-I' + d2, '-I' + os.path.join(d2, 'include')]
all_src = mesonlib.File.from_absolute_file(os.path.join(d, 'gmock-all.cc'))
main_src = mesonlib.File.from_absolute_file(os.path.join(d, 'gmock_main.cc'))
if self.main:
self.sources += [all_src, main_src]
else:
self.sources += [all_src]
self.prebuilt = False
return
self.is_found = False
def log_info(self) -> str:
if self.prebuilt:
return 'prebuilt'
else:
return 'building self'
def log_tried(self) -> str:
return 'system'
class GMockDependencyPC(PkgConfigDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
assert name == 'gmock'
if kwargs.get('main'):
name = 'gmock_main'
super().__init__(name, environment, kwargs)
class LLVMDependencyConfigTool(ConfigToolDependency):
"""
LLVM uses a special tool, llvm-config, which has arguments for getting
c args, cxx args, and ldargs as well as version.
"""
tool_name = 'llvm-config'
__cpp_blacklist = {'-DNDEBUG'}
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
self.tools = get_llvm_tool_names('llvm-config')
# Fedora starting with Fedora 30 adds a suffix of the number
# of bits in the isa that llvm targets, for example, on x86_64
# and aarch64 the name will be llvm-config-64, on x86 and arm
# it will be llvm-config-32.
if environment.machines[self.get_for_machine_from_kwargs(kwargs)].is_64_bit:
self.tools.append('llvm-config-64')
else:
self.tools.append('llvm-config-32')
# It's necessary for LLVM <= 3.8 to use the C++ linker. For 3.9 and 4.0
# the C linker works fine if only using the C API.
super().__init__(name, environment, kwargs, language='cpp')
self.provided_modules: T.List[str] = []
self.required_modules: mesonlib.OrderedSet[str] = mesonlib.OrderedSet()
self.module_details: T.List[str] = []
if not self.is_found:
return
self.provided_modules = self.get_config_value(['--components'], 'modules')
modules = stringlistify(extract_as_list(kwargs, 'modules'))
self.check_components(modules)
opt_modules = stringlistify(extract_as_list(kwargs, 'optional_modules'))
self.check_components(opt_modules, required=False)
cargs = mesonlib.OrderedSet(self.get_config_value(['--cppflags'], 'compile_args'))
self.compile_args = list(cargs.difference(self.__cpp_blacklist))
if version_compare(self.version, '>= 3.9'):
self._set_new_link_args(environment)
else:
self._set_old_link_args()
self.link_args = strip_system_libdirs(environment, self.for_machine, self.link_args)
self.link_args = self.__fix_bogus_link_args(self.link_args)
if not self._add_sub_dependency(threads_factory(environment, self.for_machine, {})):
self.is_found = False
return
def __fix_bogus_link_args(self, args: T.List[str]) -> T.List[str]:
"""This function attempts to fix bogus link arguments that llvm-config
generates.
Currently it works around the following:
- FreeBSD: when statically linking -l/usr/lib/libexecinfo.so will
be generated, strip the -l in cases like this.
- Windows: We may get -LIBPATH:... which is later interpreted as
"-L IBPATH:...", if we're using an msvc like compilers convert
that to "/LIBPATH", otherwise to "-L ..."
"""
new_args = []
for arg in args:
if arg.startswith('-l') and arg.endswith('.so'):
new_args.append(arg.lstrip('-l'))
elif arg.startswith('-LIBPATH:'):
cpp = self.env.coredata.compilers[self.for_machine]['cpp']
new_args.extend(cpp.get_linker_search_args(arg.lstrip('-LIBPATH:')))
else:
new_args.append(arg)
return new_args
def __check_libfiles(self, shared: bool) -> None:
"""Use llvm-config's --libfiles to check if libraries exist."""
mode = '--link-shared' if shared else '--link-static'
# Set self.required to true to force an exception in get_config_value
# if the returncode != 0
restore = self.required
self.required = True
try:
# It doesn't matter what the stage is, the caller needs to catch
# the exception anyway.
self.link_args = self.get_config_value(['--libfiles', mode], '')
finally:
self.required = restore
def _set_new_link_args(self, environment: 'Environment') -> None:
"""How to set linker args for LLVM versions >= 3.9"""
try:
mode = self.get_config_value(['--shared-mode'], 'link_args')[0]
except IndexError:
mlog.debug('llvm-config --shared-mode returned an error')
self.is_found = False
return
if not self.static and mode == 'static':
# If llvm is configured with LLVM_BUILD_LLVM_DYLIB but not with
# LLVM_LINK_LLVM_DYLIB and not LLVM_BUILD_SHARED_LIBS (which
# upstream doesn't recommend using), then llvm-config will lie to
# you about how to do shared-linking. It wants to link to a a bunch
# of individual shared libs (which don't exist because llvm wasn't
# built with LLVM_BUILD_SHARED_LIBS.
#
# Therefore, we'll try to get the libfiles, if the return code is 0
# or we get an empty list, then we'll try to build a working
# configuration by hand.
try:
self.__check_libfiles(True)
except DependencyException:
lib_ext = get_shared_library_suffix(environment, self.for_machine)
libdir = self.get_config_value(['--libdir'], 'link_args')[0]
# Sort for reproducibility
matches = sorted(glob.iglob(os.path.join(libdir, f'libLLVM*{lib_ext}')))
if not matches:
if self.required:
raise
self.is_found = False
return
self.link_args = self.get_config_value(['--ldflags'], 'link_args')
libname = os.path.basename(matches[0]).rstrip(lib_ext).lstrip('lib')
self.link_args.append(f'-l{libname}')
return
elif self.static and mode == 'shared':
# If, however LLVM_BUILD_SHARED_LIBS is true # (*cough* gentoo *cough*)
# then this is correct. Building with LLVM_BUILD_SHARED_LIBS has a side
# effect, it stops the generation of static archives. Therefore we need
# to check for that and error out on static if this is the case
try:
self.__check_libfiles(False)
except DependencyException:
if self.required:
raise
self.is_found = False
return
link_args = ['--link-static', '--system-libs'] if self.static else ['--link-shared']
self.link_args = self.get_config_value(
['--libs', '--ldflags'] + link_args + list(self.required_modules),
'link_args')
def _set_old_link_args(self) -> None:
"""Setting linker args for older versions of llvm.
Old versions of LLVM bring an extra level of insanity with them.
llvm-config will provide the correct arguments for static linking, but
not for shared-linnking, we have to figure those out ourselves, because
of course we do.
"""
if self.static:
self.link_args = self.get_config_value(
['--libs', '--ldflags', '--system-libs'] + list(self.required_modules),
'link_args')
else:
# llvm-config will provide arguments for static linking, so we get
# to figure out for ourselves what to link with. We'll do that by
# checking in the directory provided by --libdir for a library
# called libLLVM-<ver>.(so|dylib|dll)
libdir = self.get_config_value(['--libdir'], 'link_args')[0]
expected_name = f'libLLVM-{self.version}'
re_name = re.compile(fr'{expected_name}.(so|dll|dylib)$')
for file_ in os.listdir(libdir):
if re_name.match(file_):
self.link_args = [f'-L{libdir}',
'-l{}'.format(os.path.splitext(file_.lstrip('lib'))[0])]
break
else:
raise DependencyException(
'Could not find a dynamically linkable library for LLVM.')
def check_components(self, modules: T.List[str], required: bool = True) -> None:
"""Check for llvm components (modules in meson terms).
The required option is whether the module is required, not whether LLVM
is required.
"""
for mod in sorted(set(modules)):
status = ''
if mod not in self.provided_modules:
if required:
self.is_found = False
if self.required:
raise DependencyException(
f'Could not find required LLVM Component: {mod}')
status = '(missing)'
else:
status = '(missing but optional)'
else:
self.required_modules.add(mod)
self.module_details.append(mod + status)
def log_details(self) -> str:
if self.module_details:
return 'modules: ' + ', '.join(self.module_details)
return ''
class LLVMDependencyCMake(CMakeDependency):
def __init__(self, name: str, env: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:
self.llvm_modules = stringlistify(extract_as_list(kwargs, 'modules'))
self.llvm_opt_modules = stringlistify(extract_as_list(kwargs, 'optional_modules'))
super().__init__(name, env, kwargs, language='cpp')
# Cmake will always create a statically linked binary, so don't use
# cmake if dynamic is required
if not self.static:
self.is_found = False
mlog.warning('Ignoring LLVM CMake dependency because dynamic was requested')
return
if self.traceparser is None:
return
# Extract extra include directories and definitions
inc_dirs = self.traceparser.get_cmake_var('PACKAGE_INCLUDE_DIRS')
defs = self.traceparser.get_cmake_var('PACKAGE_DEFINITIONS')
# LLVM explicitly uses space-separated variables rather than semicolon lists
if len(defs) == 1:
defs = defs[0].split(' ')
temp = ['-I' + x for x in inc_dirs] + defs
self.compile_args += [x for x in temp if x not in self.compile_args]
if not self._add_sub_dependency(threads_factory(env, self.for_machine, {})):
self.is_found = False
return
def _main_cmake_file(self) -> str:
# Use a custom CMakeLists.txt for LLVM
return 'CMakeListsLLVM.txt'
def _extra_cmake_opts(self) -> T.List[str]:
return ['-DLLVM_MESON_MODULES={}'.format(';'.join(self.llvm_modules + self.llvm_opt_modules))]
def _map_module_list(self, modules: T.List[T.Tuple[str, bool]], components: T.List[T.Tuple[str, bool]]) -> T.List[T.Tuple[str, bool]]:
res = []
for mod, required in modules:
cm_targets = self.traceparser.get_cmake_var(f'MESON_LLVM_TARGETS_{mod}')
if not cm_targets:
if required:
raise self._gen_exception(f'LLVM module {mod} was not found')
else:
mlog.warning('Optional LLVM module', mlog.bold(mod), 'was not found')
continue
for i in cm_targets:
res += [(i, required)]
return res
def _original_module_name(self, module: str) -> str:
orig_name = self.traceparser.get_cmake_var(f'MESON_TARGET_TO_LLVM_{module}')
if orig_name:
return orig_name[0]
return module
class ValgrindDependency(PkgConfigDependency):
'''
Consumers of Valgrind usually only need the compile args and do not want to
link to its (static) libraries.
'''
def __init__(self, env: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__('valgrind', env, kwargs)
def get_link_args(self, language: T.Optional[str] = None, raw: bool = False) -> T.List[str]:
return []
class ZlibSystemDependency(SystemDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
m = self.env.machines[self.for_machine]
# I'm not sure this is entirely correct. What if we're cross compiling
# from something to macOS?
if ((m.is_darwin() and isinstance(self.clib_compiler, (AppleClangCCompiler, AppleClangCPPCompiler))) or
m.is_freebsd() or m.is_dragonflybsd() or m.is_android()):
# No need to set includes,
# on macos xcode/clang will do that for us.
# on freebsd zlib.h is in /usr/include
self.is_found = True
self.link_args = ['-lz']
else:
# Without a clib_compiler we can't find zlib, so just give up.
if self.clib_compiler is None:
self.is_found = False
return
if self.clib_compiler.get_argument_syntax() == 'msvc':
libs = ['zlib1' 'zlib']
else:
libs = ['z']
for lib in libs:
l = self.clib_compiler.find_library(lib, environment, [])
h = self.clib_compiler.has_header('zlib.h', '', environment, dependencies=[self])
if l and h[0]:
self.is_found = True
self.link_args = l
break
else:
return
v, _ = self.clib_compiler.get_define('ZLIB_VERSION', '#include <zlib.h>', self.env, [], [self])
self.version = v.strip('"')
class JNISystemDependency(SystemDependency):
def __init__(self, environment: 'Environment', kwargs: JNISystemDependencyKW):
super().__init__('jni', environment, T.cast(T.Dict[str, T.Any], kwargs))
self.feature_since = ('0.62.0', '')
m = self.env.machines[self.for_machine]
if 'java' not in environment.coredata.compilers[self.for_machine]:
detect_compiler_for(environment, 'java', self.for_machine)
self.javac = environment.coredata.compilers[self.for_machine]['java']
self.version = self.javac.version
modules: T.List[str] = mesonlib.listify(kwargs.get('modules', []))
for module in modules:
if module not in {'jvm', 'awt'}:
log = mlog.error if self.required else mlog.debug
log(f'Unknown JNI module ({module})')
self.is_found = False
return
if 'version' in kwargs and not version_compare(self.version, kwargs['version']):
mlog.error(f'Incorrect JDK version found ({self.version}), wanted {kwargs["version"]}')
self.is_found = False
return
self.java_home = environment.properties[self.for_machine].get_java_home()
if not self.java_home:
self.java_home = pathlib.Path(shutil.which(self.javac.exelist[0])).resolve().parents[1]
platform_include_dir = self.__machine_info_to_platform_include_dir(m)
if platform_include_dir is None:
mlog.error("Could not find a JDK platform include directory for your OS, please open an issue or provide a pull request.")
self.is_found = False
return
java_home_include = self.java_home / 'include'
self.compile_args.append(f'-I{java_home_include}')
self.compile_args.append(f'-I{java_home_include / platform_include_dir}')
if modules:
if m.is_windows():
java_home_lib = self.java_home / 'lib'
java_home_lib_server = java_home_lib
else:
if version_compare(self.version, '<= 1.8.0'):
# The JDK and Meson have a disagreement here, so translate it
# over. In the event more translation needs to be done, add to
# following dict.
def cpu_translate(cpu: str) -> str:
java_cpus = {
'x86_64': 'amd64',
}
return java_cpus.get(cpu, cpu)
java_home_lib = self.java_home / 'jre' / 'lib' / cpu_translate(m.cpu_family)
java_home_lib_server = java_home_lib / "server"
else:
java_home_lib = self.java_home / 'lib'
java_home_lib_server = java_home_lib / "server"
if 'jvm' in modules:
jvm = self.clib_compiler.find_library('jvm', environment, extra_dirs=[str(java_home_lib_server)])
if jvm is None:
mlog.debug('jvm library not found.')
self.is_found = False
else:
self.link_args.extend(jvm)
if 'awt' in modules:
jawt = self.clib_compiler.find_library('jawt', environment, extra_dirs=[str(java_home_lib)])
if jawt is None:
mlog.debug('jawt library not found.')
self.is_found = False
else:
self.link_args.extend(jawt)
self.is_found = True
@staticmethod
def __machine_info_to_platform_include_dir(m: 'MachineInfo') -> T.Optional[str]:
"""Translates the machine information to the platform-dependent include directory
When inspecting a JDK release tarball or $JAVA_HOME, inside the `include/` directory is a
platform dependent folder that must be on the target's include path in addition to the
parent `include/` directory.
"""
if m.is_linux():
return 'linux'
elif m.is_windows():
return 'win32'
elif m.is_darwin():
return 'darwin'
elif m.is_sunos():
return 'solaris'
return None
class JDKSystemDependency(JNISystemDependency):
def __init__(self, environment: 'Environment', kwargs: JNISystemDependencyKW):
super().__init__(environment, kwargs)
self.feature_since = ('0.59.0', '')
self.featurechecks.append(FeatureDeprecated(
'jdk system dependency',
'0.62.0',
'Use the jni system dependency instead'
))
llvm_factory = DependencyFactory(
'LLVM',
[DependencyMethods.CMAKE, DependencyMethods.CONFIG_TOOL],
cmake_class=LLVMDependencyCMake,
configtool_class=LLVMDependencyConfigTool,
)
gtest_factory = DependencyFactory(
'gtest',
[DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],
pkgconfig_class=GTestDependencyPC,
system_class=GTestDependencySystem,
)
gmock_factory = DependencyFactory(
'gmock',
[DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],
pkgconfig_class=GMockDependencyPC,
system_class=GMockDependencySystem,
)
zlib_factory = DependencyFactory(
'zlib',
[DependencyMethods.PKGCONFIG, DependencyMethods.CMAKE, DependencyMethods.SYSTEM],
cmake_name='ZLIB',
system_class=ZlibSystemDependency,
)
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import tarfile
import unittest
from builtins import object, str
from contextlib import contextmanager
from future.utils import text_type
from pants.base.project_tree import Dir, Link
from pants.engine.fs import (EMPTY_DIRECTORY_DIGEST, DirectoryDigest, DirectoryToMaterialize,
FilesContent, PathGlobs, PathGlobsAndRoot, Snapshot, create_fs_rules)
from pants.util.contextutil import temporary_dir
from pants.util.meta import AbstractClass
from pants_test.engine.scheduler_test_base import SchedulerTestBase
from pants_test.test_base import TestBase
class DirectoryListing(object):
"""TODO: See #4027."""
class ReadLink(object):
"""TODO: See #4027."""
class FSTest(TestBase, SchedulerTestBase, AbstractClass):
_original_src = os.path.join(os.path.dirname(__file__), 'examples/fs_test/fs_test.tar')
@contextmanager
def mk_project_tree(self, ignore_patterns=None):
"""Construct a ProjectTree for the given src path."""
project_tree = self.mk_fs_tree(ignore_patterns=ignore_patterns)
with tarfile.open(self._original_src) as tar:
tar.extractall(project_tree.build_root)
yield project_tree
@staticmethod
def specs(filespecs):
if isinstance(filespecs, PathGlobs):
return filespecs
else:
return PathGlobs(include=filespecs)
def assert_walk_dirs(self, filespecs_or_globs, paths, ignore_patterns=None):
self.assert_walk_snapshot('dirs', filespecs_or_globs, paths, ignore_patterns=ignore_patterns)
def assert_walk_files(self, filespecs_or_globs, paths, ignore_patterns=None):
self.assert_walk_snapshot('files', filespecs_or_globs, paths, ignore_patterns=ignore_patterns)
def assert_walk_snapshot(self, field, filespecs_or_globs, paths, ignore_patterns=None):
with self.mk_project_tree(ignore_patterns=ignore_patterns) as project_tree:
scheduler = self.mk_scheduler(rules=create_fs_rules(), project_tree=project_tree)
result = self.execute(scheduler, Snapshot, self.specs(filespecs_or_globs))[0]
self.assertEquals(sorted([p.path for p in getattr(result, field)]), sorted(paths))
def assert_content(self, filespecs_or_globs, expected_content):
with self.mk_project_tree() as project_tree:
scheduler = self.mk_scheduler(rules=create_fs_rules(), project_tree=project_tree)
snapshot = self.execute_expecting_one_result(scheduler, Snapshot, self.specs(filespecs_or_globs)).value
result = self.execute_expecting_one_result(scheduler, FilesContent, snapshot.directory_digest).value
actual_content = {f.path: f.content for f in result.dependencies}
self.assertEquals(expected_content, actual_content)
def assert_digest(self, filespecs_or_globs, expected_files):
with self.mk_project_tree() as project_tree:
scheduler = self.mk_scheduler(rules=create_fs_rules(), project_tree=project_tree)
result = self.execute(scheduler, Snapshot, self.specs(filespecs_or_globs))[0]
# Confirm all expected files were digested.
self.assertEquals(set(expected_files), set(f.path for f in result.files))
self.assertTrue(result.directory_digest.fingerprint is not None)
def assert_fsnodes(self, filespecs_or_globs, subject_product_pairs):
with self.mk_project_tree() as project_tree:
scheduler = self.mk_scheduler(rules=create_fs_rules(), project_tree=project_tree)
request = self.execute_request(scheduler, Snapshot, self.specs(filespecs_or_globs))
# Validate that FilesystemNodes for exactly the given subjects are reachable under this
# request.
fs_nodes = [n for n, _ in scheduler.product_graph.walk(roots=request.roots)
if type(n) is "TODO: need a new way to filter for FS intrinsics"]
self.assertEquals(set((n.subject, n.product) for n in fs_nodes), set(subject_product_pairs))
def test_walk_literal(self):
self.assert_walk_files(['4.txt'], ['4.txt'])
self.assert_walk_files(['a/b/1.txt', 'a/b/2'], ['a/b/1.txt', 'a/b/2'])
self.assert_walk_files(['c.ln/2'], ['c.ln/2'])
self.assert_walk_files(['d.ln/b/1.txt'], ['d.ln/b/1.txt'])
self.assert_walk_files(['a/3.txt'], ['a/3.txt'])
self.assert_walk_files(['z.txt'], [])
def test_walk_literal_directory(self):
self.assert_walk_dirs(['c.ln'], ['c.ln'])
self.assert_walk_dirs(['a'], ['a'])
self.assert_walk_dirs(['a/b'], ['a/b'])
self.assert_walk_dirs(['z'], [])
self.assert_walk_dirs(['4.txt', 'a/3.txt'], [])
def test_walk_siblings(self):
self.assert_walk_files(['*.txt'], ['4.txt'])
self.assert_walk_files(['a/b/*.txt'], ['a/b/1.txt'])
self.assert_walk_files(['c.ln/*.txt'], ['c.ln/1.txt'])
self.assert_walk_files(['a/b/*'], ['a/b/1.txt', 'a/b/2'])
self.assert_walk_files(['*/0.txt'], [])
def test_walk_recursive(self):
self.assert_walk_files(['**/*.txt.ln'], ['a/4.txt.ln', 'd.ln/4.txt.ln'])
self.assert_walk_files(['**/*.txt'], ['4.txt',
'a/3.txt',
'a/b/1.txt',
'c.ln/1.txt',
'd.ln/3.txt',
'd.ln/b/1.txt'])
self.assert_walk_files(['**/*.txt'], ['a/3.txt',
'a/b/1.txt',
'c.ln/1.txt',
'd.ln/3.txt',
'd.ln/b/1.txt',
'4.txt'])
self.assert_walk_files(['**/3.t*t'], ['a/3.txt', 'd.ln/3.txt'])
self.assert_walk_files(['**/*.zzz'], [])
def test_walk_single_star(self):
self.assert_walk_files(['*'], ['4.txt'])
def test_walk_parent_link(self):
self.assert_walk_files(['c.ln/../3.txt'], ['c.ln/../3.txt'])
def test_walk_recursive_all(self):
self.assert_walk_files(['**'], ['4.txt',
'a/3.txt',
'a/4.txt.ln',
'a/b/1.txt',
'a/b/2',
'c.ln/1.txt',
'c.ln/2',
'd.ln/3.txt',
'd.ln/4.txt.ln',
'd.ln/b/1.txt',
'd.ln/b/2'])
def test_walk_ignore(self):
# Ignore '*.ln' suffixed items at the root.
self.assert_walk_files(['**'],
['4.txt',
'a/3.txt',
'a/4.txt.ln',
'a/b/1.txt',
'a/b/2',],
ignore_patterns=['/*.ln'])
# Whitelist one entry.
self.assert_walk_files(['**'],
['4.txt',
'a/3.txt',
'a/4.txt.ln',
'a/b/1.txt',
'a/b/2',
'c.ln/1.txt',
'c.ln/2',],
ignore_patterns=['/*.ln', '!c.ln'])
def test_walk_recursive_trailing_doublestar(self):
self.assert_walk_files(['a/**'], ['a/3.txt',
'a/4.txt.ln',
'a/b/1.txt',
'a/b/2'])
self.assert_walk_files(['d.ln/**'], ['d.ln/3.txt',
'd.ln/4.txt.ln',
'd.ln/b/1.txt',
'd.ln/b/2'])
self.assert_walk_dirs(['a/**'], ['a/b'])
def test_walk_recursive_slash_doublestar_slash(self):
self.assert_walk_files(['a/**/3.txt'], ['a/3.txt'])
self.assert_walk_files(['a/**/b/1.txt'], ['a/b/1.txt'])
self.assert_walk_files(['a/**/2'], ['a/b/2'])
def test_walk_recursive_directory(self):
self.assert_walk_dirs(['*'], ['a', 'c.ln', 'd.ln'])
self.assert_walk_dirs(['*/*'], ['a/b', 'd.ln/b'])
self.assert_walk_dirs(['**/*'], ['a', 'c.ln', 'd.ln', 'a/b', 'd.ln/b'])
self.assert_walk_dirs(['*/*/*'], [])
def test_remove_duplicates(self):
self.assert_walk_files(['*', '**'], ['4.txt',
'a/3.txt',
'a/4.txt.ln',
'a/b/1.txt',
'a/b/2',
'c.ln/1.txt',
'c.ln/2',
'd.ln/3.txt',
'd.ln/4.txt.ln',
'd.ln/b/1.txt',
'd.ln/b/2'])
self.assert_walk_files(['**/*.txt', 'a/b/1.txt', '4.txt'], ['4.txt',
'a/3.txt',
'c.ln/1.txt',
'd.ln/3.txt',
'a/b/1.txt',
'd.ln/b/1.txt'])
self.assert_walk_dirs(['*', '**'], ['a', 'c.ln', 'd.ln', 'a/b', 'd.ln/b'])
def test_files_content_literal(self):
self.assert_content(['4.txt', 'a/4.txt.ln'], {'4.txt': 'four\n', 'a/4.txt.ln': 'four\n'})
def test_files_content_directory(self):
with self.assertRaises(Exception):
self.assert_content(['a/b/'], {'a/b/': 'nope\n'})
with self.assertRaises(Exception):
self.assert_content(['a/b'], {'a/b': 'nope\n'})
def test_files_content_symlink(self):
self.assert_content(['c.ln/../3.txt'], {'c.ln/../3.txt': 'three\n'})
def test_files_digest_literal(self):
self.assert_digest(['a/3.txt', '4.txt'], ['a/3.txt', '4.txt'])
@unittest.skip('Skipped to expedite landing #3821; see: #4027.')
def test_nodes_file(self):
self.assert_fsnodes(['4.txt'], [
(Dir(''), DirectoryListing),
])
@unittest.skip('Skipped to expedite landing #3821; see: #4027.')
def test_nodes_symlink_file(self):
self.assert_fsnodes(['c.ln/2'], [
(Dir(''), DirectoryListing),
(Link('c.ln'), ReadLink),
(Dir('a'), DirectoryListing),
(Dir('a/b'), DirectoryListing),
])
self.assert_fsnodes(['d.ln/b/1.txt'], [
(Dir(''), DirectoryListing),
(Link('d.ln'), ReadLink),
(Dir('a'), DirectoryListing),
(Dir('a/b'), DirectoryListing),
])
@unittest.skip('Skipped to expedite landing #3821; see: #4027.')
def test_nodes_symlink_globbed_dir(self):
self.assert_fsnodes(['*/2'], [
# Scandir for the root.
(Dir(''), DirectoryListing),
# Read links to determine whether they're actually directories.
(Link('c.ln'), ReadLink),
(Link('d.ln'), ReadLink),
# Scan second level destinations: `a/b` is matched via `c.ln`.
(Dir('a'), DirectoryListing),
(Dir('a/b'), DirectoryListing),
])
@unittest.skip('Skipped to expedite landing #3821; see: #4027.')
def test_nodes_symlink_globbed_file(self):
self.assert_fsnodes(['d.ln/b/*.txt'], [
# NB: Needs to scandir every Dir on the way down to track whether
# it is traversing a symlink.
(Dir(''), DirectoryListing),
# Traverse one symlink.
(Link('d.ln'), ReadLink),
(Dir('a'), DirectoryListing),
(Dir('a/b'), DirectoryListing),
])
def test_snapshot_from_outside_buildroot(self):
with temporary_dir() as temp_dir:
with open(os.path.join(temp_dir, "roland"), "w") as f:
f.write("European Burmese")
scheduler = self.mk_scheduler(rules=create_fs_rules())
globs = PathGlobs(("*",), ())
snapshot = scheduler.capture_snapshots((PathGlobsAndRoot(globs, text_type(temp_dir)),))[0]
self.assert_snapshot_equals(snapshot, ["roland"], DirectoryDigest(
text_type("63949aa823baf765eff07b946050d76ec0033144c785a94d3ebd82baa931cd16"),
80
))
def test_multiple_snapshots_from_outside_buildroot(self):
with temporary_dir() as temp_dir:
with open(os.path.join(temp_dir, "roland"), "w") as f:
f.write("European Burmese")
with open(os.path.join(temp_dir, "susannah"), "w") as f:
f.write("I don't know")
scheduler = self.mk_scheduler(rules=create_fs_rules())
snapshots = scheduler.capture_snapshots((
PathGlobsAndRoot(PathGlobs(("roland",), ()), text_type(temp_dir)),
PathGlobsAndRoot(PathGlobs(("susannah",), ()), text_type(temp_dir)),
PathGlobsAndRoot(PathGlobs(("doesnotexist",), ()), text_type(temp_dir)),
))
self.assertEquals(3, len(snapshots))
self.assert_snapshot_equals(snapshots[0], ["roland"], DirectoryDigest(
text_type("63949aa823baf765eff07b946050d76ec0033144c785a94d3ebd82baa931cd16"),
80
))
self.assert_snapshot_equals(snapshots[1], ["susannah"], DirectoryDigest(
text_type("d3539cfc21eb4bab328ca9173144a8e932c515b1b9e26695454eeedbc5a95f6f"),
82
))
self.assert_snapshot_equals(snapshots[2], [], EMPTY_DIRECTORY_DIGEST)
def test_snapshot_from_outside_buildroot_failure(self):
with temporary_dir() as temp_dir:
scheduler = self.mk_scheduler(rules=create_fs_rules())
globs = PathGlobs(("*",), ())
with self.assertRaises(Exception) as cm:
scheduler.capture_snapshots((PathGlobsAndRoot(globs, text_type(os.path.join(temp_dir, "doesnotexist"))),))
self.assertIn("doesnotexist", str(cm.exception))
def assert_snapshot_equals(self, snapshot, files, directory_digest):
self.assertEquals([file.path for file in snapshot.files], files)
self.assertEquals(snapshot.directory_digest, directory_digest)
def test_merge_zero_directories(self):
scheduler = self.mk_scheduler(rules=create_fs_rules())
dir = scheduler.merge_directories(())
self.assertEqual(EMPTY_DIRECTORY_DIGEST, dir)
def test_merge_directories(self):
with temporary_dir() as temp_dir:
with open(os.path.join(temp_dir, "roland"), "w") as f:
f.write("European Burmese")
with open(os.path.join(temp_dir, "susannah"), "w") as f:
f.write("Not sure actually")
scheduler = self.mk_scheduler(rules=create_fs_rules())
(empty_snapshot, roland_snapshot, susannah_snapshot, both_snapshot) = (
scheduler.capture_snapshots((
PathGlobsAndRoot(PathGlobs(("doesnotmatch",), ()), text_type(temp_dir)),
PathGlobsAndRoot(PathGlobs(("roland",), ()), text_type(temp_dir)),
PathGlobsAndRoot(PathGlobs(("susannah",), ()), text_type(temp_dir)),
PathGlobsAndRoot(PathGlobs(("*",), ()), text_type(temp_dir)),
))
)
empty_merged = scheduler.merge_directories((empty_snapshot.directory_digest))
self.assertEquals(
empty_snapshot.directory_digest,
empty_merged,
)
roland_merged = scheduler.merge_directories((
roland_snapshot.directory_digest,
empty_snapshot.directory_digest,
))
self.assertEquals(
roland_snapshot.directory_digest,
roland_merged,
)
both_merged = scheduler.merge_directories((
roland_snapshot.directory_digest,
susannah_snapshot.directory_digest,
))
self.assertEquals(both_snapshot.directory_digest, both_merged)
def test_materialize_directories(self):
# I tried passing in the digest of a file, but it didn't make it to the
# rust code due to all of the checks we have in place (which is probably a good thing).
self.prime_store_with_roland_digest()
with temporary_dir() as temp_dir:
dir_path = os.path.join(temp_dir, "containing_roland")
digest = DirectoryDigest(
text_type("63949aa823baf765eff07b946050d76ec0033144c785a94d3ebd82baa931cd16"),
80
)
scheduler = self.mk_scheduler(rules=create_fs_rules())
scheduler.materialize_directories((DirectoryToMaterialize(text_type(dir_path), digest),))
created_file = os.path.join(dir_path, "roland")
with open(created_file) as f:
content = f.read()
self.assertEquals(content, "European Burmese")
def test_glob_match_error(self):
with self.assertRaises(ValueError) as cm:
self.assert_walk_files(PathGlobs(
include=['not-a-file.txt'],
exclude=[],
glob_match_error_behavior='error',
), [])
expected_msg = (
"Globs did not match. Excludes were: []. Unmatched globs were: [\"not-a-file.txt\"].")
self.assertIn(expected_msg, str(cm.exception))
def test_glob_match_exclude_error(self):
with self.assertRaises(ValueError) as cm:
self.assert_walk_files(PathGlobs(
include=['*.txt'],
exclude=['4.txt'],
glob_match_error_behavior='error',
), [])
expected_msg = (
"Globs did not match. Excludes were: [\"4.txt\"]. Unmatched globs were: [\"*.txt\"].")
self.assertIn(expected_msg, str(cm.exception))
def test_glob_match_ignore_logging(self):
with self.captured_logging(logging.WARNING) as captured:
self.assert_walk_files(PathGlobs(
include=['not-a-file.txt'],
exclude=[''],
glob_match_error_behavior='ignore',
), [])
self.assertEqual(0, len(captured.warnings()))
@unittest.skip('Skipped to expedite landing #5769: see #5863')
def test_glob_match_warn_logging(self):
with self.captured_logging(logging.WARNING) as captured:
self.assert_walk_files(PathGlobs(
include=['not-a-file.txt'],
exclude=[''],
glob_match_error_behavior='warn',
), [])
all_warnings = captured.warnings()
self.assertEqual(1, len(all_warnings))
single_warning = all_warnings[0]
self.assertEqual("???", str(single_warning))
def prime_store_with_roland_digest(self):
"""This method primes the store with a directory of a file named 'roland' and contents 'European Burmese'."""
with temporary_dir() as temp_dir:
with open(os.path.join(temp_dir, "roland"), "w") as f:
f.write("European Burmese")
scheduler = self.mk_scheduler(rules=create_fs_rules())
globs = PathGlobs(("*",), ())
snapshot = scheduler.capture_snapshots((PathGlobsAndRoot(globs, text_type(temp_dir)),))[0]
self.assert_snapshot_equals(snapshot, ["roland"], DirectoryDigest(
text_type("63949aa823baf765eff07b946050d76ec0033144c785a94d3ebd82baa931cd16"),
80
))
| |
##########################################################################
#
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""d3d.h"""
from winapi import *
from ddraw import *
from d3dtypes import *
from d3dcaps import *
d3dnextFlags = Flags(DWORD, [
"D3DNEXT_NEXT",
"D3DNEXT_HEAD",
"D3DNEXT_TAIL",
])
direct3dFlags = Flags(DWORD, [
"DIRECT3D_VERSION",
])
d3ddpFlags = Flags(DWORD, [
"D3DDP_WAIT",
"D3DDP_OUTOFORDER",
"D3DDP_DONOTCLIP",
"D3DDP_DONOTUPDATEEXTENTS",
"D3DDP_DONOTLIGHT",
])
HRESULT = MAKE_HRESULT(ok = "D3D_OK", errors = [
"D3DERR_BADMAJORVERSION",
"D3DERR_BADMINORVERSION",
"D3DERR_INVALID_DEVICE",
"D3DERR_INITFAILED",
"D3DERR_DEVICEAGGREGATED",
"D3DERR_EXECUTE_CREATE_FAILED",
"D3DERR_EXECUTE_DESTROY_FAILED",
"D3DERR_EXECUTE_LOCK_FAILED",
"D3DERR_EXECUTE_UNLOCK_FAILED",
"D3DERR_EXECUTE_LOCKED",
"D3DERR_EXECUTE_NOT_LOCKED",
"D3DERR_EXECUTE_FAILED",
"D3DERR_EXECUTE_CLIPPED_FAILED",
"D3DERR_TEXTURE_NO_SUPPORT",
"D3DERR_TEXTURE_CREATE_FAILED",
"D3DERR_TEXTURE_DESTROY_FAILED",
"D3DERR_TEXTURE_LOCK_FAILED",
"D3DERR_TEXTURE_UNLOCK_FAILED",
"D3DERR_TEXTURE_LOAD_FAILED",
"D3DERR_TEXTURE_SWAP_FAILED",
"D3DERR_TEXTURE_LOCKED",
"D3DERR_TEXTURE_NOT_LOCKED",
"D3DERR_TEXTURE_GETSURF_FAILED",
"D3DERR_MATRIX_CREATE_FAILED",
"D3DERR_MATRIX_DESTROY_FAILED",
"D3DERR_MATRIX_SETDATA_FAILED",
"D3DERR_MATRIX_GETDATA_FAILED",
"D3DERR_SETVIEWPORTDATA_FAILED",
"D3DERR_INVALIDCURRENTVIEWPORT",
"D3DERR_INVALIDPRIMITIVETYPE",
"D3DERR_INVALIDVERTEXTYPE",
"D3DERR_TEXTURE_BADSIZE",
"D3DERR_INVALIDRAMPTEXTURE",
"D3DERR_MATERIAL_CREATE_FAILED",
"D3DERR_MATERIAL_DESTROY_FAILED",
"D3DERR_MATERIAL_SETDATA_FAILED",
"D3DERR_MATERIAL_GETDATA_FAILED",
"D3DERR_INVALIDPALETTE",
"D3DERR_ZBUFF_NEEDS_SYSTEMMEMORY",
"D3DERR_ZBUFF_NEEDS_VIDEOMEMORY",
"D3DERR_SURFACENOTINVIDMEM",
"D3DERR_LIGHT_SET_FAILED",
"D3DERR_LIGHTHASVIEWPORT",
"D3DERR_LIGHTNOTINTHISVIEWPORT",
"D3DERR_SCENE_IN_SCENE",
"D3DERR_SCENE_NOT_IN_SCENE",
"D3DERR_SCENE_BEGIN_FAILED",
"D3DERR_SCENE_END_FAILED",
"D3DERR_INBEGIN",
"D3DERR_NOTINBEGIN",
"D3DERR_NOVIEWPORTS",
"D3DERR_VIEWPORTDATANOTSET",
"D3DERR_VIEWPORTHASNODEVICE",
"D3DERR_NOCURRENTVIEWPORT",
"D3DERR_INVALIDVERTEXFORMAT",
"D3DERR_COLORKEYATTACHED",
"D3DERR_VERTEXBUFFEROPTIMIZED",
"D3DERR_VBUF_CREATE_FAILED",
"D3DERR_VERTEXBUFFERLOCKED",
"D3DERR_VERTEXBUFFERUNLOCKFAILED",
"D3DERR_ZBUFFER_NOTPRESENT",
"D3DERR_STENCILBUFFER_NOTPRESENT",
"D3DERR_WRONGTEXTUREFORMAT",
"D3DERR_UNSUPPORTEDCOLOROPERATION",
"D3DERR_UNSUPPORTEDCOLORARG",
"D3DERR_UNSUPPORTEDALPHAOPERATION",
"D3DERR_UNSUPPORTEDALPHAARG",
"D3DERR_TOOMANYOPERATIONS",
"D3DERR_CONFLICTINGTEXTUREFILTER",
"D3DERR_UNSUPPORTEDFACTORVALUE",
"D3DERR_CONFLICTINGRENDERSTATE",
"D3DERR_UNSUPPORTEDTEXTUREFILTER",
"D3DERR_TOOMANYPRIMITIVES",
"D3DERR_INVALIDMATRIX",
"D3DERR_TOOMANYVERTICES",
"D3DERR_CONFLICTINGTEXTUREPALETTE",
"D3DERR_INVALIDSTATEBLOCK",
"D3DERR_INBEGINSTATEBLOCK",
"D3DERR_NOTINBEGINSTATEBLOCK",
])
IDirect3D = Interface("IDirect3D", IUnknown)
IDirect3D2 = Interface("IDirect3D2", IUnknown)
IDirect3D3 = Interface("IDirect3D3", IUnknown)
IDirect3D7 = Interface("IDirect3D7", IUnknown)
IDirect3DDevice = Interface("IDirect3DDevice", IUnknown)
IDirect3DDevice2 = Interface("IDirect3DDevice2", IUnknown)
IDirect3DDevice3 = Interface("IDirect3DDevice3", IUnknown)
IDirect3DDevice7 = Interface("IDirect3DDevice7", IUnknown)
IDirect3DExecuteBuffer = Interface("IDirect3DExecuteBuffer", IUnknown)
IDirect3DLight = Interface("IDirect3DLight", IUnknown)
IDirect3DMaterial = Interface("IDirect3DMaterial", IUnknown)
IDirect3DMaterial2 = Interface("IDirect3DMaterial2", IUnknown)
IDirect3DMaterial3 = Interface("IDirect3DMaterial3", IUnknown)
IDirect3DTexture = Interface("IDirect3DTexture", IUnknown)
IDirect3DTexture2 = Interface("IDirect3DTexture2", IUnknown)
IDirect3DViewport = Interface("IDirect3DViewport", IUnknown)
IDirect3DViewport2 = Interface("IDirect3DViewport2", IDirect3DViewport)
IDirect3DViewport3 = Interface("IDirect3DViewport3", IDirect3DViewport2)
IDirect3DVertexBuffer = Interface("IDirect3DVertexBuffer", IUnknown)
IDirect3DVertexBuffer7 = Interface("IDirect3DVertexBuffer7", IUnknown)
LPUNKNOWN = ObjPointer(IUnknown)
LPDIRECT3D = ObjPointer(IDirect3D)
LPDIRECT3DDEVICE = ObjPointer(IDirect3DDevice)
LPDIRECT3DEXECUTEBUFFER = ObjPointer(IDirect3DExecuteBuffer)
LPDIRECT3DLIGHT = ObjPointer(IDirect3DLight)
LPDIRECT3DMATERIAL = ObjPointer(IDirect3DMaterial)
LPDIRECT3DTEXTURE = ObjPointer(IDirect3DTexture)
LPDIRECT3DVIEWPORT = ObjPointer(IDirect3DViewport)
LPDIRECT3D2 = ObjPointer(IDirect3D2)
LPDIRECT3DDEVICE2 = ObjPointer(IDirect3DDevice2)
LPDIRECT3DMATERIAL2 = ObjPointer(IDirect3DMaterial2)
LPDIRECT3DTEXTURE2 = ObjPointer(IDirect3DTexture2)
LPDIRECT3DVIEWPORT2 = ObjPointer(IDirect3DViewport2)
LPDIRECT3D3 = ObjPointer(IDirect3D3)
LPDIRECT3DDEVICE3 = ObjPointer(IDirect3DDevice3)
LPDIRECT3DMATERIAL3 = ObjPointer(IDirect3DMaterial3)
LPDIRECT3DVIEWPORT3 = ObjPointer(IDirect3DViewport3)
LPDIRECT3DVERTEXBUFFER = ObjPointer(IDirect3DVertexBuffer)
LPDIRECT3D7 = ObjPointer(IDirect3D7)
LPDIRECT3DDEVICE7 = ObjPointer(IDirect3DDevice7)
LPDIRECT3DVERTEXBUFFER7 = ObjPointer(IDirect3DVertexBuffer7)
IDirect3D.methods += [
StdMethod(HRESULT, "Initialize", [(REFCLSID, "riid")]),
StdMethod(HRESULT, "EnumDevices", [(LPD3DENUMDEVICESCALLBACK, "lpEnumDevicesCallback"), (LPVOID, "lpUserArg")]),
StdMethod(HRESULT, "CreateLight", [Out(Pointer(LPDIRECT3DLIGHT), "lplpDirect3DLight"), (LPUNKNOWN, "pUnkOuter")]),
StdMethod(HRESULT, "CreateMaterial", [Out(Pointer(LPDIRECT3DMATERIAL), "lplpDirect3DMaterial"), (LPUNKNOWN, "pUnkOuter")]),
StdMethod(HRESULT, "CreateViewport", [Out(Pointer(LPDIRECT3DVIEWPORT), "lplpD3DViewport"), (LPUNKNOWN, "pUnkOuter")]),
StdMethod(HRESULT, "FindDevice", [(LPD3DFINDDEVICESEARCH, "lpD3DDFS"), (LPD3DFINDDEVICERESULT, "lplpD3DDevice")]),
]
IDirect3D2.methods += [
StdMethod(HRESULT, "EnumDevices", [(LPD3DENUMDEVICESCALLBACK, "lpEnumDevicesCallback"), (LPVOID, "lpUserArg")]),
StdMethod(HRESULT, "CreateLight", [Out(Pointer(LPDIRECT3DLIGHT), "lplpDirect3DLight"), (LPUNKNOWN, "pUnkOuter")]),
StdMethod(HRESULT, "CreateMaterial", [Out(Pointer(LPDIRECT3DMATERIAL2), "lplpDirect3DMaterial2"), (LPUNKNOWN, "pUnkOuter")]),
StdMethod(HRESULT, "CreateViewport", [Out(Pointer(LPDIRECT3DVIEWPORT2), "lplpD3DViewport2"), (LPUNKNOWN, "pUnkOuter")]),
StdMethod(HRESULT, "FindDevice", [(LPD3DFINDDEVICESEARCH, "lpD3DDFS"), (LPD3DFINDDEVICERESULT, "lpD3DFDR")]),
StdMethod(HRESULT, "CreateDevice", [(REFCLSID, "rclsid"), (LPDIRECTDRAWSURFACE, "lpDDS"), Out(Pointer(LPDIRECT3DDEVICE2), "lplpD3DDevice2")]),
]
IDirect3D3.methods += [
StdMethod(HRESULT, "EnumDevices", [(LPD3DENUMDEVICESCALLBACK, "lpEnumDevicesCallback"), (LPVOID, "lpUserArg")]),
StdMethod(HRESULT, "CreateLight", [Out(Pointer(LPDIRECT3DLIGHT), "lplpDirect3DLight"), (LPUNKNOWN, "pUnkOuter")]),
StdMethod(HRESULT, "CreateMaterial", [Out(Pointer(LPDIRECT3DMATERIAL3), "lplpDirect3DMaterial3"), (LPUNKNOWN, "pUnkOuter")]),
StdMethod(HRESULT, "CreateViewport", [Out(Pointer(LPDIRECT3DVIEWPORT3), "lplpD3DViewport3"), (LPUNKNOWN, "pUnkOuter")]),
StdMethod(HRESULT, "FindDevice", [(LPD3DFINDDEVICESEARCH, "lpD3DDFS"), Out(LPD3DFINDDEVICERESULT, "lpD3DFDR")]),
StdMethod(HRESULT, "CreateDevice", [(REFCLSID, "rclsid"), (LPDIRECTDRAWSURFACE4, "lpDDS"), Out(Pointer(LPDIRECT3DDEVICE3), "lplpD3DDevice3"), (LPUNKNOWN, "lpUnk")]),
StdMethod(HRESULT, "CreateVertexBuffer", [(LPD3DVERTEXBUFFERDESC, "lpD3DVertBufDesc"), Out(Pointer(LPDIRECT3DVERTEXBUFFER), "lplpD3DVertBuf"), (DWORD, "dwFlags"), (LPUNKNOWN, "lpUnk")]),
StdMethod(HRESULT, "EnumZBufferFormats", [(REFCLSID, "riidDevice"), (LPD3DENUMPIXELFORMATSCALLBACK, "lpEnumCallback"), (LPVOID, "lpContext")]),
StdMethod(HRESULT, "EvictManagedTextures", []),
]
IDirect3D7.methods += [
StdMethod(HRESULT, "EnumDevices", [(LPD3DENUMDEVICESCALLBACK7, "lpEnumDevicesCallback"), (LPVOID, "lpUserArg")]),
StdMethod(HRESULT, "CreateDevice", [(REFCLSID, "rclsid"), (LPDIRECTDRAWSURFACE7, "lpDDS"), Out(Pointer(LPDIRECT3DDEVICE7), "lplpD3DDevice")]),
StdMethod(HRESULT, "CreateVertexBuffer", [(LPD3DVERTEXBUFFERDESC, "lpD3DVertBufDesc"), Out(Pointer(LPDIRECT3DVERTEXBUFFER7), "lplpD3DVertBuf"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "EnumZBufferFormats", [(REFCLSID, "riidDevice"), (LPD3DENUMPIXELFORMATSCALLBACK, "lpEnumCallback"), (LPVOID, "lpContext")]),
StdMethod(HRESULT, "EvictManagedTextures", []),
]
IDirect3DDevice.methods += [
StdMethod(HRESULT, "Initialize", [(LPDIRECT3D, "lpDirect3D"), (LPGUID, "lpGUID"), (LPD3DDEVICEDESC, "lpD3DDVDesc")]),
StdMethod(HRESULT, "GetCaps", [Out(LPD3DDEVICEDESC, "lpD3DHWDevDesc"), Out(LPD3DDEVICEDESC, "lpD3DHELDevDesc")]),
StdMethod(HRESULT, "SwapTextureHandles", [(LPDIRECT3DTEXTURE, "lpD3Dtex1"), (LPDIRECT3DTEXTURE, "lpD3DTex2")]),
StdMethod(HRESULT, "CreateExecuteBuffer", [(LPD3DEXECUTEBUFFERDESC, "lpDesc"), Out(Pointer(LPDIRECT3DEXECUTEBUFFER), "lplpDirect3DExecuteBuffer"), (LPUNKNOWN, "pUnkOuter")]),
StdMethod(HRESULT, "GetStats", [(LPD3DSTATS, "lpD3DStats")]),
StdMethod(HRESULT, "Execute", [(LPDIRECT3DEXECUTEBUFFER, "lpDirect3DExecuteBuffer"), (LPDIRECT3DVIEWPORT, "lpDirect3DViewport"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "AddViewport", [(LPDIRECT3DVIEWPORT, "lpDirect3DViewport")]),
StdMethod(HRESULT, "DeleteViewport", [(LPDIRECT3DVIEWPORT, "lpDirect3DViewport")]),
StdMethod(HRESULT, "NextViewport", [(LPDIRECT3DVIEWPORT, "lpDirect3DViewport"), Out(Pointer(LPDIRECT3DVIEWPORT), "lplpDirect3DViewport"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "Pick", [(LPDIRECT3DEXECUTEBUFFER, "lpDirect3DExecuteBuffer"), (LPDIRECT3DVIEWPORT, "lpDirect3DViewport"), (DWORD, "dwFlags"), (LPD3DRECT, "lpRect")]),
StdMethod(HRESULT, "GetPickRecords", [(LPDWORD, "lpCount"), (LPD3DPICKRECORD, "lpD3DPickRec")]),
StdMethod(HRESULT, "EnumTextureFormats", [(LPD3DENUMTEXTUREFORMATSCALLBACK, "lpD3DEnumTextureProc"), (LPVOID, "lpArg")]),
StdMethod(HRESULT, "CreateMatrix", [Out(LPD3DMATRIXHANDLE, "lpD3DMatHandle")]),
StdMethod(HRESULT, "SetMatrix", [(D3DMATRIXHANDLE, "D3DMatHandle"), (Const(LPD3DMATRIX), "lpD3DMatrix")]),
StdMethod(HRESULT, "GetMatrix", [(D3DMATRIXHANDLE, "D3DMatHandle"), Out(LPD3DMATRIX, "lpD3DMatrix")]),
StdMethod(HRESULT, "DeleteMatrix", [(D3DMATRIXHANDLE, "D3DMatHandle")]),
StdMethod(HRESULT, "BeginScene", []),
StdMethod(HRESULT, "EndScene", []),
StdMethod(HRESULT, "GetDirect3D", [Out(Pointer(LPDIRECT3D), "lplpDirect3D")]),
]
IDirect3DDevice2.methods += [
StdMethod(HRESULT, "GetCaps", [Out(LPD3DDEVICEDESC, "lpD3DHWDevDesc"), Out(LPD3DDEVICEDESC, "lpD3DHELDevDesc")]),
StdMethod(HRESULT, "SwapTextureHandles", [(LPDIRECT3DTEXTURE2, "lpD3DTex1"), (LPDIRECT3DTEXTURE2, "lpD3DTex2")]),
StdMethod(HRESULT, "GetStats", [Out(LPD3DSTATS, "lpD3DStats")]),
StdMethod(HRESULT, "AddViewport", [(LPDIRECT3DVIEWPORT2, "lpDirect3DViewport2")]),
StdMethod(HRESULT, "DeleteViewport", [(LPDIRECT3DVIEWPORT2, "lpDirect3DViewport2")]),
StdMethod(HRESULT, "NextViewport", [(LPDIRECT3DVIEWPORT2, "lpDirect3DViewport2"), Out(Pointer(LPDIRECT3DVIEWPORT2), "lplpDirect3DViewport2"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "EnumTextureFormats", [(LPD3DENUMTEXTUREFORMATSCALLBACK, "lpD3DEnumTextureProc"), (LPVOID, "lpArg")]),
StdMethod(HRESULT, "BeginScene", []),
StdMethod(HRESULT, "EndScene", []),
StdMethod(HRESULT, "GetDirect3D", [Out(Pointer(LPDIRECT3D2), "lplpDirect3D2")]),
StdMethod(HRESULT, "SetCurrentViewport", [(LPDIRECT3DVIEWPORT2, "lpDirect3DViewport2")]),
StdMethod(HRESULT, "GetCurrentViewport", [Out(Pointer(LPDIRECT3DVIEWPORT2), "lplpDirect3DViewport2")]),
StdMethod(HRESULT, "SetRenderTarget", [(LPDIRECTDRAWSURFACE, "lpNewRenderTarget"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "GetRenderTarget", [Out(Pointer(LPDIRECTDRAWSURFACE), "lplpRenderTarget")]),
StdMethod(HRESULT, "Begin", [(D3DPRIMITIVETYPE, "d3dpt"), (D3DVERTEXTYPE, "dwVertexTypeDesc"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "BeginIndexed", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (D3DVERTEXTYPE, "d3dvtVertexType"), (LPVOID, "lpvVertices"), (DWORD, "dwNumVertices"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "Vertex", [(LPVOID, "lpVertexType")]),
StdMethod(HRESULT, "Index", [(WORD, "wVertexIndex")]),
StdMethod(HRESULT, "End", [(DWORD, "dwFlags")]),
StdMethod(HRESULT, "GetRenderState", [(D3DRENDERSTATETYPE, "dwRenderStateType"), Out(LPDWORD, "lpdwRenderState")]),
StdMethod(HRESULT, "SetRenderState", [(D3DRENDERSTATETYPE, "dwRenderStateType"), (DWORD, "dwRenderState")]),
StdMethod(HRESULT, "GetLightState", [(D3DLIGHTSTATETYPE, "dwLightStateType"), Out(LPDWORD, "lpdwLightState")]),
StdMethod(HRESULT, "SetLightState", [(D3DLIGHTSTATETYPE, "dwLightStateType"), (DWORD, "dwLightState")]),
StdMethod(HRESULT, "SetTransform", [(D3DTRANSFORMSTATETYPE, "dtstTransformStateType"), (LPD3DMATRIX, "lpD3DMatrix")]),
StdMethod(HRESULT, "GetTransform", [(D3DTRANSFORMSTATETYPE, "dtstTransformStateType"), Out(LPD3DMATRIX, "lpD3DMatrix")]),
StdMethod(HRESULT, "MultiplyTransform", [(D3DTRANSFORMSTATETYPE, "dtstTransformStateType"), (LPD3DMATRIX, "lpD3DMatrix")]),
StdMethod(HRESULT, "DrawPrimitive", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (D3DVERTEXTYPE, "d3dvtVertexType"), (LPVOID, "lpvVertices"), (DWORD, "dwVertexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "DrawIndexedPrimitive", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (D3DVERTEXTYPE, "d3dvtVertexType"), (LPVOID, "lpvVertices"), (DWORD, "dwVertexCount"), (LPWORD, "dwIndices"), (DWORD, "dwIndexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "SetClipStatus", [(LPD3DCLIPSTATUS, "lpD3DClipStatus")]),
StdMethod(HRESULT, "GetClipStatus", [(LPD3DCLIPSTATUS, "lpD3DClipStatus")]),
]
IDirect3DDevice3.methods += [
StdMethod(HRESULT, "GetCaps", [Out(LPD3DDEVICEDESC, "lpD3DHWDevDesc"), Out(LPD3DDEVICEDESC, "lpD3DHELDevDesc")]),
StdMethod(HRESULT, "GetStats", [Out(LPD3DSTATS, "lpD3DStats")]),
StdMethod(HRESULT, "AddViewport", [(LPDIRECT3DVIEWPORT3, "lpDirect3DViewport3")]),
StdMethod(HRESULT, "DeleteViewport", [(LPDIRECT3DVIEWPORT3, "lpDirect3DViewport3")]),
StdMethod(HRESULT, "NextViewport", [(LPDIRECT3DVIEWPORT3, "lpDirect3DViewport3"), Out(Pointer(LPDIRECT3DVIEWPORT3), "lplpDirect3DViewport3"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "EnumTextureFormats", [(LPD3DENUMPIXELFORMATSCALLBACK, "lpD3DEnumPixelProc"), (LPVOID, "lpArg")]),
StdMethod(HRESULT, "BeginScene", []),
StdMethod(HRESULT, "EndScene", []),
StdMethod(HRESULT, "GetDirect3D", [Out(Pointer(LPDIRECT3D3), "lplpDirect3D3")]),
StdMethod(HRESULT, "SetCurrentViewport", [(LPDIRECT3DVIEWPORT3, "lpDirect3DViewport3")]),
StdMethod(HRESULT, "GetCurrentViewport", [Out(Pointer(LPDIRECT3DVIEWPORT3), "lplpDirect3DViewport3")]),
StdMethod(HRESULT, "SetRenderTarget", [(LPDIRECTDRAWSURFACE4, "lpNewRenderTarget"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "GetRenderTarget", [Out(Pointer(LPDIRECTDRAWSURFACE4), "lplpRenderTarget")]),
StdMethod(HRESULT, "Begin", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (DWORD, "dwVertexTypeDesc"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "BeginIndexed", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (DWORD, "d3dvtVertexType"), (LPVOID, "lpvVertices"), (DWORD, "dwNumVertices"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "Vertex", [(LPVOID, "lpVertexType")]),
StdMethod(HRESULT, "Index", [(WORD, "wVertexIndex")]),
StdMethod(HRESULT, "End", [(DWORD, "dwFlags")]),
StdMethod(HRESULT, "GetRenderState", [(D3DRENDERSTATETYPE, "dwRenderStateType"), Out(LPDWORD, "lpdwRenderState")]),
StdMethod(HRESULT, "SetRenderState", [(D3DRENDERSTATETYPE, "dwRenderStateType"), (DWORD, "dwRenderState")]),
StdMethod(HRESULT, "GetLightState", [(D3DLIGHTSTATETYPE, "dwLightStateType"), Out(LPDWORD, "lpdwLightState")]),
StdMethod(HRESULT, "SetLightState", [(D3DLIGHTSTATETYPE, "dwLightStateType"), (DWORD, "dwLightState")]),
StdMethod(HRESULT, "SetTransform", [(D3DTRANSFORMSTATETYPE, "dtstTransformStateType"), (LPD3DMATRIX, "lpD3DMatrix")]),
StdMethod(HRESULT, "GetTransform", [(D3DTRANSFORMSTATETYPE, "dtstTransformStateType"), Out(LPD3DMATRIX, "lpD3DMatrix")]),
StdMethod(HRESULT, "MultiplyTransform", [(D3DTRANSFORMSTATETYPE, "dtstTransformStateType"), (LPD3DMATRIX, "lpD3DMatrix")]),
StdMethod(HRESULT, "DrawPrimitive", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (DWORD, "d3dvtVertexType"), (LPVOID, "lpvVertices"), (DWORD, "dwVertexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "DrawIndexedPrimitive", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (DWORD, "d3dvtVertexType"), (LPVOID, "lpvVertices"), (DWORD, "dwVertexCount"), (LPWORD, "dwIndices"), (DWORD, "dwIndexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "SetClipStatus", [(LPD3DCLIPSTATUS, "lpD3DClipStatus")]),
StdMethod(HRESULT, "GetClipStatus", [Out(LPD3DCLIPSTATUS, "lpD3DClipStatus")]),
StdMethod(HRESULT, "DrawPrimitiveStrided", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (DWORD, "dwVertexType"), (LPD3DDRAWPRIMITIVESTRIDEDDATA, "lpD3DDrawPrimStrideData"), (DWORD, "dwVertexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "DrawIndexedPrimitiveStrided", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (DWORD, "dwVertexType"), (LPD3DDRAWPRIMITIVESTRIDEDDATA, "lpD3DDrawPrimStrideData"), (DWORD, "dwVertexCount"), (LPWORD, "lpIndex"), (DWORD, "dwIndexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "DrawPrimitiveVB", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (LPDIRECT3DVERTEXBUFFER, "lpD3DVertexBuf"), (DWORD, "dwStartVertex"), (DWORD, "dwNumVertices"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "DrawIndexedPrimitiveVB", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (LPDIRECT3DVERTEXBUFFER, "lpD3DVertexBuf"), (LPWORD, "lpwIndices"), (DWORD, "dwIndexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "ComputeSphereVisibility", [(LPD3DVECTOR, "lpCenters"), (LPD3DVALUE, "lpRadii"), (DWORD, "dwNumSpheres"), (DWORD, "dwFlags"), (LPDWORD, "lpdwReturnValues")]),
StdMethod(HRESULT, "GetTexture", [(DWORD, "dwStage"), Out(Pointer(LPDIRECT3DTEXTURE2), "lplpTexture2")]),
StdMethod(HRESULT, "SetTexture", [(DWORD, "dwStage"), (LPDIRECT3DTEXTURE2, "lpTexture2")]),
StdMethod(HRESULT, "GetTextureStageState", [(DWORD, "dwStage"), (D3DTEXTURESTAGESTATETYPE, "d3dTexStageStateType"), Out(LPDWORD, "lpdwState")]),
StdMethod(HRESULT, "SetTextureStageState", [(DWORD, "dwStage"), (D3DTEXTURESTAGESTATETYPE, "d3dTexStageStateType"), (DWORD, "dwState")]),
StdMethod(HRESULT, "ValidateDevice", [(LPDWORD, "lpdwPasses")]),
]
IDirect3DDevice7.methods += [
StdMethod(HRESULT, "GetCaps", [Out(LPD3DDEVICEDESC7, "lpD3DHELDevDesc")]),
StdMethod(HRESULT, "EnumTextureFormats", [(LPD3DENUMPIXELFORMATSCALLBACK, "lpD3DEnumPixelProc"), (LPVOID, "lpArg")]),
StdMethod(HRESULT, "BeginScene", []),
StdMethod(HRESULT, "EndScene", []),
StdMethod(HRESULT, "GetDirect3D", [Out(Pointer(LPDIRECT3D7), "lplpDirect3D3")]),
StdMethod(HRESULT, "SetRenderTarget", [(LPDIRECTDRAWSURFACE7, "lpNewRenderTarget"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "GetRenderTarget", [Out(Pointer(LPDIRECTDRAWSURFACE7), "lplpRenderTarget")]),
StdMethod(HRESULT, "Clear", [(DWORD, "dwCount"), (LPD3DRECT, "lpRects"), (DWORD, "dwFlags"), (D3DCOLOR, "dwColor"), (D3DVALUE, "dvZ"), (DWORD, "dwStencil")]),
StdMethod(HRESULT, "SetTransform", [(D3DTRANSFORMSTATETYPE, "dtstTransformStateType"), (LPD3DMATRIX, "lpD3DMatrix")]),
StdMethod(HRESULT, "GetTransform", [(D3DTRANSFORMSTATETYPE, "dtstTransformStateType"), Out(LPD3DMATRIX, "lpD3DMatrix")]),
StdMethod(HRESULT, "SetViewport", [(LPD3DVIEWPORT7, "lpData")]),
StdMethod(HRESULT, "MultiplyTransform", [(D3DTRANSFORMSTATETYPE, "dtstTransformStateType"), (LPD3DMATRIX, "lpD3DMatrix")]),
StdMethod(HRESULT, "GetViewport", [Out(LPD3DVIEWPORT7, "lpData")]),
StdMethod(HRESULT, "SetMaterial", [(LPD3DMATERIAL7, "lpMat")]),
StdMethod(HRESULT, "GetMaterial", [Out(LPD3DMATERIAL7, "lpMat")]),
StdMethod(HRESULT, "SetLight", [(DWORD, "dwLightIndex"), (LPD3DLIGHT7, "lpLight")]),
StdMethod(HRESULT, "GetLight", [(DWORD, "dwLightIndex"), (LPD3DLIGHT7, "lpLight")]),
StdMethod(HRESULT, "SetRenderState", [(D3DRENDERSTATETYPE, "dwRenderStateType"), (DWORD, "dwRenderState")]),
StdMethod(HRESULT, "GetRenderState", [(D3DRENDERSTATETYPE, "dwRenderStateType"), Out(LPDWORD, "lpdwRenderState")]),
StdMethod(HRESULT, "BeginStateBlock", []),
StdMethod(HRESULT, "EndStateBlock", [Out(LPDWORD, "lpdwBlockHandle")]),
StdMethod(HRESULT, "PreLoad", [(LPDIRECTDRAWSURFACE7, "lpddsTexture")]),
StdMethod(HRESULT, "DrawPrimitive", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (D3DFVF, "d3dvtVertexType"), (LPVOID, "lpvVertices"), (DWORD, "dwVertexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "DrawIndexedPrimitive", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (D3DFVF, "d3dvtVertexType"), (LPVOID, "lpvVertices"), (DWORD, "dwVertexCount"), (LPWORD, "dwIndices"), (DWORD, "dwIndexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "SetClipStatus", [(LPD3DCLIPSTATUS, "lpD3DClipStatus")]),
StdMethod(HRESULT, "GetClipStatus", [Out(LPD3DCLIPSTATUS, "lpD3DClipStatus")]),
StdMethod(HRESULT, "DrawPrimitiveStrided", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (D3DFVF, "dwVertexType"), (LPD3DDRAWPRIMITIVESTRIDEDDATA, "lpD3DDrawPrimStrideData"), (DWORD, "dwVertexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "DrawIndexedPrimitiveStrided", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (D3DFVF, "dwVertexType"), (LPD3DDRAWPRIMITIVESTRIDEDDATA, "lpD3DDrawPrimStrideData"), (DWORD, "dwVertexCount"), (LPWORD, "lpIndex"), (DWORD, "dwIndexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "DrawPrimitiveVB", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (LPDIRECT3DVERTEXBUFFER7, "lpD3DVertexBuf"), (DWORD, "dwStartVertex"), (DWORD, "dwNumVertices"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "DrawIndexedPrimitiveVB", [(D3DPRIMITIVETYPE, "d3dptPrimitiveType"), (LPDIRECT3DVERTEXBUFFER7, "lpD3DVertexBuf"), (DWORD, "dwStartVertex"), (DWORD, "dwNumVertices"), (LPWORD, "lpwIndices"), (DWORD, "dwIndexCount"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "ComputeSphereVisibility", [(LPD3DVECTOR, "lpCenters"), (LPD3DVALUE, "lpRadii"), (DWORD, "dwNumSpheres"), (DWORD, "dwFlags"), (LPDWORD, "lpdwReturnValues")]),
StdMethod(HRESULT, "GetTexture", [(DWORD, "dwStage"), Out(Pointer(LPDIRECTDRAWSURFACE7), "lpTexture")]),
StdMethod(HRESULT, "SetTexture", [(DWORD, "dwStage"), (LPDIRECTDRAWSURFACE7, "lpTexture")]),
StdMethod(HRESULT, "GetTextureStageState", [(DWORD, "dwStage"), (D3DTEXTURESTAGESTATETYPE, "d3dTexStageStateType"), Out(LPDWORD, "lpdwState")]),
StdMethod(HRESULT, "SetTextureStageState", [(DWORD, "dwStage"), (D3DTEXTURESTAGESTATETYPE, "d3dTexStageStateType"), (DWORD, "dwState")]),
StdMethod(HRESULT, "ValidateDevice", [Out(LPDWORD, "lpdwPasses")]),
StdMethod(HRESULT, "ApplyStateBlock", [(DWORD, "dwBlockHandle")]),
StdMethod(HRESULT, "CaptureStateBlock", [(DWORD, "dwBlockHandle")]),
StdMethod(HRESULT, "DeleteStateBlock", [(DWORD, "dwBlockHandle")]),
StdMethod(HRESULT, "CreateStateBlock", [(D3DSTATEBLOCKTYPE, "d3dsbType"), Out(LPDWORD, "lpdwBlockHandle")]),
StdMethod(HRESULT, "Load", [(LPDIRECTDRAWSURFACE7, "lpDestTex"), (LPPOINT, "lpDestPoint"), (LPDIRECTDRAWSURFACE7, "lpSrcTex"), (LPRECT, "lprcSrcRect"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "LightEnable", [(DWORD, "dwLightIndex"), (BOOL, "bEnable")]),
StdMethod(HRESULT, "GetLightEnable", [(DWORD, "dwLightIndex"), Out(Pointer(BOOL), "pbEnable")]),
StdMethod(HRESULT, "SetClipPlane", [(DWORD, "dwIndex"), (Pointer(D3DVALUE), "pPlaneEquation")]),
StdMethod(HRESULT, "GetClipPlane", [(DWORD, "dwIndex"), Out(Pointer(D3DVALUE), "pPlaneEquation")]),
StdMethod(HRESULT, "GetInfo", [(DWORD, "dwDevInfoID"), Out(LPVOID, "pDevInfoStruct"), (DWORD, "dwSize")]),
]
IDirect3DExecuteBuffer.methods += [
StdMethod(HRESULT, "Initialize", [(LPDIRECT3DDEVICE, "lpDirect3DDevice"), (LPD3DEXECUTEBUFFERDESC, "lpDesc")]),
StdMethod(HRESULT, "Lock", [(LPD3DEXECUTEBUFFERDESC, "lpDesc")]),
StdMethod(HRESULT, "Unlock", []),
StdMethod(HRESULT, "SetExecuteData", [(LPD3DEXECUTEDATA, "lpData")]),
StdMethod(HRESULT, "GetExecuteData", [Out(LPD3DEXECUTEDATA, "lpData")]),
StdMethod(HRESULT, "Validate", [(LPDWORD, "lpdwOffset"), (LPD3DVALIDATECALLBACK, "lpFunc"), (LPVOID, "lpUserArg"), (DWORD, "dwReserved")]),
StdMethod(HRESULT, "Optimize", [(DWORD, "dwDummy")]),
]
IDirect3DLight.methods += [
StdMethod(HRESULT, "Initialize", [(LPDIRECT3D, "lpDirect3D")]),
StdMethod(HRESULT, "SetLight", [(LPD3DLIGHT, "lpLight")]),
StdMethod(HRESULT, "GetLight", [Out(LPD3DLIGHT, "lpLight")]),
]
IDirect3DMaterial.methods += [
StdMethod(HRESULT, "Initialize", [(LPDIRECT3D, "lpDirect3D")]),
StdMethod(HRESULT, "SetMaterial", [(LPD3DMATERIAL, "lpMat")]),
StdMethod(HRESULT, "GetMaterial", [Out(LPD3DMATERIAL, "lpMat")]),
StdMethod(HRESULT, "GetHandle", [(LPDIRECT3DDEVICE, "lpDirect3DDevice"), Out(LPD3DMATERIALHANDLE, "lpHandle")]),
StdMethod(HRESULT, "Reserve", []),
StdMethod(HRESULT, "Unreserve", []),
]
IDirect3DMaterial2.methods += [
StdMethod(HRESULT, "SetMaterial", [(LPD3DMATERIAL, "lpMat")]),
StdMethod(HRESULT, "GetMaterial", [Out(LPD3DMATERIAL, "lpMat")]),
StdMethod(HRESULT, "GetHandle", [(LPDIRECT3DDEVICE2, "lpDirect3DDevice2"), Out(LPD3DMATERIALHANDLE, "lpHandle")]),
]
IDirect3DMaterial3.methods += [
StdMethod(HRESULT, "SetMaterial", [(LPD3DMATERIAL, "lpMat")]),
StdMethod(HRESULT, "GetMaterial", [Out(LPD3DMATERIAL, "lpMat")]),
StdMethod(HRESULT, "GetHandle", [(LPDIRECT3DDEVICE3, "lpDirect3DDevice3"), Out(LPD3DMATERIALHANDLE, "lpHandle")]),
]
IDirect3DTexture.methods += [
StdMethod(HRESULT, "Initialize", [(LPDIRECT3DDEVICE, "lpDirect3DDevice"), (LPDIRECTDRAWSURFACE, "lpDDSurface")]),
StdMethod(HRESULT, "GetHandle", [(LPDIRECT3DDEVICE, "lpDirect3DDevice"), Out(LPD3DTEXTUREHANDLE, "lpHandle")]),
StdMethod(HRESULT, "PaletteChanged", [(DWORD, "dwStart"), (DWORD, "dwCount")]),
StdMethod(HRESULT, "Load", [(LPDIRECT3DTEXTURE, "lpD3DTexture")]),
StdMethod(HRESULT, "Unload", []),
]
IDirect3DTexture2.methods += [
StdMethod(HRESULT, "GetHandle", [(LPDIRECT3DDEVICE2, "lpDirect3DDevice2"), Out(LPD3DTEXTUREHANDLE, "lpHandle")]),
StdMethod(HRESULT, "PaletteChanged", [(DWORD, "dwStart"), (DWORD, "dwCount")]),
StdMethod(HRESULT, "Load", [(LPDIRECT3DTEXTURE2, "lpD3DTexture2")]),
]
IDirect3DViewport.methods += [
StdMethod(HRESULT, "Initialize", [(LPDIRECT3D, "lpDirect3D")]),
StdMethod(HRESULT, "GetViewport", [Out(LPD3DVIEWPORT, "lpData")]),
StdMethod(HRESULT, "SetViewport", [(LPD3DVIEWPORT, "lpData")]),
StdMethod(HRESULT, "TransformVertices", [(DWORD, "dwVertexCount"), (LPD3DTRANSFORMDATA, "lpData"), (DWORD, "dwFlags"), (LPDWORD, "lpOffScreen")]),
StdMethod(HRESULT, "LightElements", [(DWORD, "dwElementCount"), (LPD3DLIGHTDATA, "lpData")]),
StdMethod(HRESULT, "SetBackground", [(D3DMATERIALHANDLE, "hMat")]),
StdMethod(HRESULT, "GetBackground", [Out(LPD3DMATERIALHANDLE, "lphMat"), Out(LPBOOL, "lpValid")]),
StdMethod(HRESULT, "SetBackgroundDepth", [(LPDIRECTDRAWSURFACE, "lpDDSurface")]),
StdMethod(HRESULT, "GetBackgroundDepth", [Out(Pointer(LPDIRECTDRAWSURFACE), "lplpDDSurface"), Out(LPBOOL, "lpValid")]),
StdMethod(HRESULT, "Clear", [(DWORD, "dwCount"), (LPD3DRECT, "lpRects"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "AddLight", [(LPDIRECT3DLIGHT, "lpDirect3DLight")]),
StdMethod(HRESULT, "DeleteLight", [(LPDIRECT3DLIGHT, "lpDirect3DLight")]),
StdMethod(HRESULT, "NextLight", [(LPDIRECT3DLIGHT, "lpDirect3DLight"), Out(Pointer(LPDIRECT3DLIGHT), "lplpDirect3DLight"), (DWORD, "dwFlags")]),
]
IDirect3DViewport2.methods += [
StdMethod(HRESULT, "GetViewport2", [Out(LPD3DVIEWPORT2, "lpData")]),
StdMethod(HRESULT, "SetViewport2", [(LPD3DVIEWPORT2, "lpData")]),
]
IDirect3DViewport3.methods += [
StdMethod(HRESULT, "SetBackgroundDepth2", [(LPDIRECTDRAWSURFACE4, "lpDDS")]),
StdMethod(HRESULT, "GetBackgroundDepth2", [Out(Pointer(LPDIRECTDRAWSURFACE4), "lplpDDS"), (LPBOOL, "lpValid")]),
StdMethod(HRESULT, "Clear2", [(DWORD, "dwCount"), (LPD3DRECT, "lpRects"), (DWORD, "dwFlags"), (D3DCOLOR, "dwColor"), (D3DVALUE, "dvZ"), (DWORD, "dwStencil")]),
]
IDirect3DVertexBuffer.methods += [
StdMethod(HRESULT, "Lock", [(DWORD, "dwFlags"), Out(Pointer(LPVOID), "lplpData"), (LPDWORD, "lpdwSize")]),
StdMethod(HRESULT, "Unlock", []),
StdMethod(HRESULT, "ProcessVertices", [(DWORD, "dwVertexOp"), (DWORD, "dwDestIndex"), (DWORD, "dwCount"), (LPDIRECT3DVERTEXBUFFER, "lpSrcBuffer"), (DWORD, "dwSrcIndex"), (LPDIRECT3DDEVICE3, "lpD3DDevice"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "GetVertexBufferDesc", [Out(LPD3DVERTEXBUFFERDESC, "lpD3DVertexBufferDesc")]),
StdMethod(HRESULT, "Optimize", [(LPDIRECT3DDEVICE3, "lpD3DDevice"), (DWORD, "dwFlags")]),
]
IDirect3DVertexBuffer7.methods += [
StdMethod(HRESULT, "Lock", [(DWORD, "dwFlags"), Out(Pointer(LPVOID), "lplpData"), (LPDWORD, "lpdwSize")]),
StdMethod(HRESULT, "Unlock", []),
StdMethod(HRESULT, "ProcessVertices", [(DWORD, "dwVertexOp"), (DWORD, "dwDestIndex"), (DWORD, "dwCount"), (LPDIRECT3DVERTEXBUFFER7, "lpSrcBuffer"), (DWORD, "dwSrcIndex"), (LPDIRECT3DDEVICE7, "lpD3DDevice"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "GetVertexBufferDesc", [Out(LPD3DVERTEXBUFFERDESC, "lpD3DVertexBufferDesc")]),
StdMethod(HRESULT, "Optimize", [(LPDIRECT3DDEVICE7, "lpD3DDevice"), (DWORD, "dwFlags")]),
StdMethod(HRESULT, "ProcessVerticesStrided", [(DWORD, "dwVertexOp"), (DWORD, "dwDestIndex"), (DWORD, "dwCount"), (LPD3DDRAWPRIMITIVESTRIDEDDATA, "lpStrideData"), (DWORD, "dwVertexTypeDesc"), (LPDIRECT3DDEVICE7, "lpD3DDevice"), (DWORD, "dwFlags")]),
]
interfaces = [
IDirectDraw,
IDirectDraw2,
IDirectDraw4,
IDirectDraw7,
IDirect3D,
IDirect3D2,
IDirect3D3,
IDirect3D7,
]
ddraw.addInterfaces(interfaces)
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module adds support for multiple processes in the dev_appserver.
Each instance of the application is started as a separate process on a unique
port, for state isolation and parallel execution. A load-balancing process is
also created for each Backend. An API Server process is launched to handle all
memcache and datastore API requests, so that persistent state is shared across
all processes. Application and Backend instances forward their memcache and
datastore requests to the API Server using the remote_api interface.
The base process is considered the Master. It manages all subprocesses, assigns
them ports, and issues /_ah/start requests to Backend instances. Ports are
either fixed (using the base value of --multiprocess_min_port) or randomly
chosen. The Master listens on the --port specified by the user, and forwards
all requests to an App Instance process.
Each balancer forwards incoming requests to the next free instance,
or return with a HTTP 503 error if no free instance is available.
"""
import BaseHTTPServer
import copy
import cStringIO
import errno
import httplib
import logging
import os
import Queue
import signal
import socket
import subprocess
import sys
import threading
import time
import weakref
from google.appengine.api import backendinfo
from google.appengine.api.backends import backends as backends_api
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.tools import api_server
ARG_ADDRESS = 'address'
ARG_PORT = 'port'
ARG_BACKENDS = 'backends'
ARG_MULTIPROCESS = 'multiprocess'
ARG_MULTIPROCESS_MIN_PORT = 'multiprocess_min_port'
ARG_MULTIPROCESS_API_PORT = 'multiprocess_api_port'
ARG_MULTIPROCESS_API_SERVER = 'multiprocess_api_server'
ARG_MULTIPROCESS_APP_INSTANCE_ID = 'multiprocess_app_instance'
ARG_MULTIPROCESS_BACKEND_ID = 'multiprocess_backend_id'
ARG_MULTIPROCESS_BACKEND_INSTANCE_ID = 'multiprocess_backend_instance_id'
ARG_MULTIPROCESS_FRONTEND_PORT = 'multiprocess_frontend_port'
API_SERVER_HOST = 'localhost'
PATH_DEV_API_SERVER = '/_ah/dev_api_server'
BACKEND_MAX_INSTANCES = 20
class Error(Exception): pass
def SetThreadName(thread, name):
"""Sets the name a of thread, including the GlobalProcess name."""
thread.setName('[%s: %s]' % (GlobalProcess().Type(), name))
class StartInstance(threading.Thread):
"""Thread that periodically attempts to start a backend instance."""
def __init__(self, child):
threading.Thread.__init__(self)
self.child = child
self.setDaemon(True)
SetThreadName(self, 'Start %s' % child)
def run(self):
while True:
self.child.SendStartRequest()
time.sleep(1)
class ChildProcess(object):
def __init__(self,
host,
port,
app_instance=None,
backend_id=None,
instance_id=None,
frontend_port=None):
"""Creates an object representing a child process.
Only one of the given args should be provided (except for instance_id when
backend_id is specified).
Args:
app_instance: (int) The process represents the indicated app instance.
backend_id: (string) The process represents a backend.
instance_id: (int) The process represents the given backend instance.
frontend_port: (int) for backends, the frontend port.
"""
self.app_instance = app_instance
self.backend_id = backend_id
self.instance_id = instance_id
self.process = None
self.argv = []
self.started = False
self.connection_handler = httplib.HTTPConnection
self.SetHostPort(host, port)
self.frontend_port = frontend_port
def __str__(self):
if self.app_instance is not None:
string = 'App Instance'
elif self.instance_id is not None:
string = 'Backend Instance: %s.%d' % (self.backend_id, self.instance_id)
elif self.backend_id:
string = 'Backend Balancer: %s' % self.backend_id
else:
string = 'Unknown'
return '%s [%s]' % (string, self.Address())
def SetHostPort(self, host, port):
"""Sets the host and port that this process listens on."""
self.host = host
self.port = port
if self.backend_id:
backends_api._set_dev_port(self.port,
self.backend_id,
self.instance_id)
def Address(self):
"""Returns the URL for this process."""
return 'http://%s:%d' % self.HostPort()
def HostPort(self):
"""Returns the address of this process as a (host, port) pair."""
return (self.host, self.port)
def Start(self, argv, api_port):
"""Starts the child process.
Args:
argv: The argv of the parent process. When starting the subprocess,
we make a copy of the parent's argv, then modify it in accordance with
how the ChildProcess is configured, to represent different processes in
the multiprocess dev_appserver.
api_port: The port on which the API Server listens.
"""
self.argv = copy.deepcopy(argv)
self.api_port = api_port
self.SetFlag('--multiprocess')
self.SetFlag('--address', short_flag='-a', value=self.host)
self.SetFlag('--port', short_flag='-p', value=self.port)
self.SetFlag('--multiprocess_api_port', value=self.api_port)
if self.frontend_port is not None:
self.SetFlag('--multiprocess_frontend_port', value=self.frontend_port)
if self.app_instance is not None:
self.SetFlag('--multiprocess_app_instance_id', value=0)
if self.backend_id is not None:
self.SetFlag('--multiprocess_backend_id', value=self.backend_id)
if self.instance_id is not None:
self.SetFlag('--multiprocess_backend_instance_id', value=self.instance_id)
if self.argv[0].endswith('.py'):
self.argv.insert(0, sys.executable)
logging.debug('Starting %s with args: %s', self, self.argv)
self.process = subprocess.Popen(self.argv)
def EnableStartRequests(self):
"""Starts a thread to periodically send /_ah/start to this instance.
We need a thread to do this because we want to restart any resident Backends
that have been shutdown, and because a backend instance is not considered
to be ready for serving until it has successfully responded to /_ah/start.
"""
if self.backend_id and self.instance_id is not None:
self.start_thread = StartInstance(self)
self.start_thread.start()
def Connect(self):
"""Attempts to connect to the child process.
Returns:
bool: Whether a connection was made.
"""
logging.debug('Attempting connection to %s', self)
sock = None
result = True
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(self.HostPort())
except:
result = False
if sock:
sock.close()
return result
def WaitForConnection(self, timeout_s=30.0, poll_period_s=0.5):
"""Blocks until the child process has started.
This method repeatedly attempts to connect to the process on its HTTP server
port. Returns when a connection has been successfully established or the
timeout has been reached.
Args:
timeout_s: Amount of time to wait, in seconds.
poll_period_s: Time to wait between connection attempts.
"""
finish_time = time.time() + timeout_s
while time.time() < finish_time:
if self.Connect():
return True
time.sleep(poll_period_s)
logging.info('%s took more than %d seconds to start.', self, timeout_s)
return False
def SendStartRequest(self):
"""If the process has not been started, sends a request to /_ah/start."""
if self.started:
return
try:
response = self.SendRequest('GET', '/_ah/start')
rc = response.status
if (rc >= 200 and rc < 300) or rc == 404:
self.started = True
except KeyboardInterrupt:
pass
except Exception, e:
logging.error('Failed start request to %s: %s', self, e)
def SendRequest(self, command, path, payload=None, headers=None):
"""Sends an HTTP request to this process.
Args:
command: The HTTP command (e.g., GET, POST)
path: The URL path for the request.
headers: A dictionary containing headers as key-value pairs.
"""
logging.debug('send request: %s %s to %s' % (command, path, self))
connection = self.connection_handler('%s:%d' % self.HostPort())
connection.request(command, path, payload, headers or {})
response = connection.getresponse()
return response
def SetFlag(self, flag, short_flag=None, value=None):
"""Add a flag to self.argv, replacing the existing value if set.
Args:
flag: flag to remove.
short_flag: one letter short version of the flag (optional)
value: Value of the flag (optional)
"""
self.RemoveFlag(flag, short_flag=short_flag, has_value=(value is not None))
if value is None:
self.argv.append(flag)
else:
self.argv.append(flag + '=' + str(value))
def RemoveFlag(self, flag, short_flag=None, has_value=False):
"""Removes an argument from self.argv.
Args:
flag: flag to remove.
short_flag: one letter short version of the flag
has_value: True if the next argument after the short flag is the value.
"""
new_argv = []
index = 0
while index < len(self.argv):
value = self.argv[index]
index += 1
if flag == value:
if has_value:
index += 1
continue
if has_value and value.startswith(flag + '='):
continue
if short_flag == value:
if has_value:
index += 1
continue
new_argv.append(value)
self.argv = new_argv
class DevProcess(object):
"""Represents a process in the multiprocess dev_appserver."""
TYPE_MASTER = 'Master'
TYPE_APP_INSTANCE = 'App Instance'
TYPE_BACKEND_BALANCER = 'Backend Balancer'
TYPE_BACKEND_INSTANCE = 'Backend Instance'
TYPES = frozenset([TYPE_MASTER,
TYPE_APP_INSTANCE,
TYPE_BACKEND_BALANCER,
TYPE_BACKEND_INSTANCE])
def __init__(self):
"""Creates a DevProcess with a default configuration."""
self.process_type = None
self.desc = None
self.http_server = None
self.app_id = None
self.backends = None
self.app_instance = None
self.backend_id = None
self.instance_id = None
self.backend_entry = None
self.host = None
self.port = None
self.api_port = None
self.multiprocess_min_port = 9000
self.children = []
self.child_app_instance = None
self.child_api_server = None
self.balance_set = None
self.started = False
def Init(self, appinfo, backends, options):
"""Supplies a list of backends for future use.
Args:
appinfo: An AppInfoExternal object.
backends: List of BackendEntry objects.
options: Dictionary of command-line options.
"""
self.backends = backends
self.options = options
self.app_id = appinfo.application
self.host = options[ARG_ADDRESS]
self.port = options[ARG_PORT]
if ARG_MULTIPROCESS_APP_INSTANCE_ID in options:
self.SetType(DevProcess.TYPE_APP_INSTANCE)
self.app_instance = options[ARG_MULTIPROCESS_APP_INSTANCE_ID]
self.desc = str(self.app_instance)
if ARG_MULTIPROCESS_BACKEND_ID in options:
self.backend_id = options[ARG_MULTIPROCESS_BACKEND_ID]
self.desc = self.backend_id
if ARG_MULTIPROCESS_BACKEND_INSTANCE_ID in options:
self.SetType(DevProcess.TYPE_BACKEND_INSTANCE)
self.instance_id = int(options[ARG_MULTIPROCESS_BACKEND_INSTANCE_ID])
self.desc += '.%d' % self.instance_id
else:
self.SetType(DevProcess.TYPE_BACKEND_BALANCER)
if ARG_MULTIPROCESS_API_PORT in options:
self.api_port = int(options[ARG_MULTIPROCESS_API_PORT])
if ARG_MULTIPROCESS_MIN_PORT in options:
self.multiprocess_min_port = int(options[ARG_MULTIPROCESS_MIN_PORT])
if self.IsBackend():
self.InitBackendEntry()
if not self.Type():
self.SetType(DevProcess.TYPE_MASTER)
def InitBackendEntry(self):
"""Finds the entry for the backend this process represents, if any."""
for backend in self.backends:
if backend.name == self.backend_id:
self.backend_entry = backend
if not self.backend_entry:
raise Error('No backend entry found for: ' % self)
def HttpServer(self):
"""Returns the HTTPServer used by this process."""
return self.http_server
def Address(self):
"""Returns the address of this process."""
return 'http://%s:%d' % (self.host, self.port)
def SetHttpServer(self, http_server):
"""Sets the http_server to be used when handling requests.
Args:
http_server: An HTTPServer that receives requests.
"""
self.http_server = http_server
self.handle_requests = HandleRequestThread()
self.handle_requests.start()
def StartChildren(self, argv, options):
"""Starts the set of child processes."""
self.children = []
base_port = self.multiprocess_min_port
self.frontend_port = base_port
next_port = base_port
self.child_app_instance = ChildProcess(self.host, next_port,
app_instance=0)
self.children.append(self.child_app_instance)
next_port += 1
for backend in self.backends:
base_port += 100
next_port = base_port
for i in xrange(backend.instances):
self.children.append(ChildProcess(self.host, next_port,
backend_id=backend.name,
instance_id=i,
frontend_port=self.frontend_port))
next_port += 1
self.children.append(ChildProcess(self.host, base_port + 99,
backend_id=backend.name))
base_port += 100
next_port = base_port
self.child_api_server = api_server.APIServerProcess(
executable=sys.executable,
script=os.path.join(os.path.dirname(argv[0]), 'api_server.py'),
host=self.host,
port=next_port,
app_id=self.app_id,
application_host=options['address'],
application_port=options['port'],
application_root=options['root_path'],
auto_id_policy=options['auto_id_policy'],
blobstore_path=options['blobstore_path'],
clear_datastore=options['clear_datastore'],
clear_prospective_search=options['clear_prospective_search'],
datastore_path=options['datastore_path'],
enable_sendmail=options['enable_sendmail'],
enable_task_running=not options['disable_task_running'],
high_replication=options['high_replication'],
logs_path=options['logs_path'],
prospective_search_path=options['prospective_search_path'],
require_indexes=options['require_indexes'],
show_mail_body=options['show_mail_body'],
smtp_host=options['smtp_host'],
smtp_password=options['smtp_password'],
smtp_port=options['smtp_port'],
smtp_user=options['smtp_user'],
task_retry_seconds=options['task_retry_seconds'],
trusted=options['trusted'],
use_sqlite=options['use_sqlite'],
)
self.child_api_server.Start()
if self.multiprocess_min_port == 0:
self.AssignPortsRandomly()
self.api_port = next_port
for child in self.children:
child.Start(argv, self.api_port)
self.child_api_server.WaitUntilServing()
for child in self.children:
child.WaitForConnection()
message = '\n\nMultiprocess Setup Complete:'
message += '\n Remote API Server [%s]' % self.child_api_server.url
for child in self.children:
message += '\n %s' % child
message += '\n'
logging.info(message)
for child in self.children:
child.EnableStartRequests()
def AssignPortsRandomly(self):
"""Acquires a random port for each child process."""
bound = []
for child in self.children:
sock = socket.socket()
sock.bind(('localhost', 0))
bound.append(sock)
child.SetHostPort(self.host, sock.getsockname()[1])
for sock in bound:
sock.close()
def __str__(self):
result = '[%s]' % self.Type()
if self.desc:
result += ' [%s]' % self.desc
return result
def SetType(self, process_type):
if process_type not in DevProcess.TYPES:
raise Error('Unknown process type: %s' % process_type)
if self.process_type is not None:
raise Error('Process type cannot be set more than once.')
self.process_type = process_type
def Type(self):
return self.process_type
def IsDefault(self):
"""Indicates whether this is the default dev_appserver process."""
return self.Type() is None
def IsMaster(self):
"""Indicates whether this is the master process."""
return self.Type() == DevProcess.TYPE_MASTER
def IsSubprocess(self):
"""Indicates that this is a subprocessess of the dev_appserver."""
return not (self.IsDefault() or self.IsMaster())
def IsAppInstance(self):
"""Indicates whether this process represents an application instance."""
return self.Type() == DevProcess.TYPE_APP_INSTANCE
def IsBackend(self):
"""Indicates whether this process represents a backend."""
return self.IsBackendBalancer() or self.IsBackendInstance()
def IsBackendBalancer(self):
"""Indicates whether this process represents a backend load balancer."""
return self.Type() == DevProcess.TYPE_BACKEND_BALANCER
def IsBackendInstance(self):
"""Indicates whether this process represents a backend instance."""
return self.Type() == DevProcess.TYPE_BACKEND_INSTANCE
def IsBalancer(self):
"""Indicates whether this process represents a load balancer."""
return self.IsMaster() or self.IsBackendBalancer()
def IsInstance(self):
"""Indicates whether this process represents an instance."""
return self.IsAppInstance() or self.IsBackendInstance()
def InitBalanceSet(self):
"""Construct a list of instances to balance traffic over."""
if self.IsMaster():
self.balance_set = [ self.child_app_instance.port ]
if self.IsBackendBalancer():
self.balance_set = []
for instance in xrange(self.backend_entry.instances):
port = backends_api._get_dev_port(self.backend_id, instance)
self.balance_set.append(port)
def GetBalanceSet(self):
"""Return the set of ports over which this process balances requests."""
return self.balance_set
def FailFast(self):
"""Indicates whether this process has fail-fast behavior."""
if not self.backend_entry:
return False
if self.backend_entry.failfast:
return True
return False
def PrintStartMessage(self, app_id, host, port):
"""Print the start message for processes that are started automatically."""
url = 'http://%s:%d' % (host, port)
admin_url = '%s/_ah/admin' % url
if not self.IsSubprocess():
logging.info('Running application %s on port %d: %s',
app_id, port, url)
logging.info('Admin console is available at: %s',
admin_url)
def Children(self):
"""Returns the children of this process."""
return self.children
def MaybeConfigureRemoteDataApis(self):
"""Set up stubs using remote_api as appropriate.
If this is the API server (or is not multiprocess), return False.
Otherwise, set up the stubs for data based APIs as remote stubs pointing at
the to the API server and return True.
"""
if self.IsDefault():
return False
services = (
'app_identity_service',
'capability_service',
'datastore_v3',
'mail',
'memcache',
'taskqueue',
'urlfetch',
'xmpp',
)
remote_api_stub.ConfigureRemoteApi(
self.app_id, PATH_DEV_API_SERVER, lambda: ('', ''),
servername='%s:%d' % (API_SERVER_HOST, self.api_port),
services=services, use_remote_datastore=False)
return True
def NewAppInfo(self, appinfo):
"""Called when a new appinfo is read from disk on each request.
The only action we take is to apply backend settings, such as the 'start'
directive, which adds a handler for /_ah/start.
Args:
appinfo: An AppInfoExternal to be used on the next request.
"""
if self.backends:
appinfo.backends = self.backends
if self.IsBackend():
appinfo.ApplyBackendSettings(self.backend_id)
def UpdateEnv(self, env_dict):
"""Copies backend port information to the supplied environment dictionary.
This information is used by the Backends API to resolve backend and instance
addresses in the dev_appserver.
User-supplied code has no access to the default environment. This method
will copy the environment variables needed for the backends api from the
default environment to the environment where user supplied code runs.
Args:
env_dict: Dictionary with the new environment.
"""
if self.backend_id:
env_dict['BACKEND_ID'] = self.backend_id
if self.instance_id is not None:
env_dict['INSTANCE_ID'] = str(self.instance_id)
for key in os.environ:
if key.startswith('BACKEND_PORT'):
env_dict[key] = os.environ[key]
def ProcessRequest(self, request, client_address):
"""Handles the SocketServer process_request call.
If the request is to a backend the request will be handled by a separate
thread. If the backend is busy a 503 response will be sent.
If this is a balancer instance each incoming request will be forwarded to
its own thread and handled there.
If no backends are configured this override has no effect.
Args:
http_server: The http server handling the request
request: the request to process
client_address: the client address
"""
assert not self.IsDefault()
if self.IsBalancer():
ForwardRequestThread(request, client_address).start()
return
assert self.IsAppInstance() or self.IsBackendInstance()
if self.handle_requests.Active():
if self.FailFast():
logging.info('respond busy')
RespondBusyHandler(request, client_address)
return
self.handle_requests.Enqueue(request, client_address)
def HandleRequest(self, request):
"""Hook that allows the DevProcess a chance to respond to requests.
This hook is invoked just before normal request dispatch occurs in
dev_appserver.py.
Args:
request: The request to be handled.
Returns:
bool: Indicates whether the request was handled here. If False, normal
request handling should proceed.
"""
if self.IsBackendInstance() and not self.started:
if request.path != '/_ah/start':
request.send_response(httplib.FORBIDDEN,
'Waiting for start request to finish.')
return True
return False
def RequestComplete(self, request, response):
"""Invoked when the process has finished handling a request."""
rc = response.status_code
if request.path == '/_ah/start':
if (rc >= 200 and rc < 300) or rc == 404:
self.started = True
def UpdateSystemStub(self, system_service_stub):
"""Copies info about the backends into the system stub."""
if self.IsDefault():
return
system_service_stub.set_backend_info(self.backends)
class HandleRequestThread(threading.Thread):
"""Thread for handling HTTP requests.
Instances needs to be able to respond with 503 when busy with other requests,
therefore requests are accepted in the main thread and forwarded to the
serving thread for processing. If the serving thread is busy with other
requests and the max pending queue length is reached a 503 error is sent back.
Args:
http_server: Http server class handling the request.
max_pending_requests: The maximum number of pending requests in the queue.
"""
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
SetThreadName(self, 'HandleRequestThread')
self.active = False
self.pending = Queue.Queue()
def Active(self):
"""Indicates whether this thread is busy handling a request."""
return self.active
def Enqueue(self, request, client_address):
"""Adds the indicated request to the pending request queue."""
self.pending.put_nowait((request, client_address))
def run(self):
"""Takes requests from the queue and handles them."""
while True:
request, client_address = self.pending.get()
self.active = True
try:
HandleRequestDirectly(request, client_address)
except Exception, e:
logging.info('Exception in HandleRequestThread', exc_info=1)
finally:
self.active = False
class RespondBusyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handler that always will send back a 503 error."""
def __init__(self, request, client_address):
BaseHTTPServer.BaseHTTPRequestHandler.__init__(
self, request, client_address, HttpServer())
def handle_one_request(self):
"""Override."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
return
self.send_error(httplib.SERVICE_UNAVAILABLE, 'Busy.')
class ForwardRequestThread(threading.Thread):
"""Forwards an incoming request in a separate thread."""
def __init__(self, request, client_address):
threading.Thread.__init__(self)
self.request = request
self.client_address = client_address
self.setDaemon(True)
SetThreadName(self, 'ForwardRequestThread')
def run(self):
ForwardRequestHandler(self.request, self.client_address)
class ForwardRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Forwards the incoming request to next free backend instance."""
def __init__(self,
request,
client_address,
connection_handler=httplib.HTTPConnection):
"""Constructor extending BaseHTTPRequestHandler.
Args:
request: The incoming request.
client_address: A (ip, port) tuple with the address of the client.
backend: The HTTPServer that received the request.
connection_handler: http library to use when balancer the connection to
the next available backend instance. Used for dependency injection.
"""
self.connection_handler = connection_handler
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self,
request,
client_address,
HttpServer())
def handle_one_request(self):
"""Override. Invoked from BaseHTTPRequestHandler constructor."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request():
return
process = GlobalProcess()
balance_set = process.GetBalanceSet()
request_size = int(self.headers.get('content-length', 0))
payload = self.rfile.read(request_size)
for port in balance_set:
logging.debug('balancer to port %d', port)
connection = self.connection_handler(process.host, port=port)
connection.response_class = ForwardResponse
connection.request(self.command, self.path, payload, dict(self.headers))
try:
response = connection.getresponse()
except httplib.HTTPException, e:
self.send_error(httplib.INTERNAL_SERVER_ERROR, str(e))
return
if response.status != httplib.SERVICE_UNAVAILABLE:
self.wfile.write(response.data)
return
self.send_error(httplib.SERVICE_UNAVAILABLE, 'Busy')
class ForwardResponse(httplib.HTTPResponse):
"""Modifies the HTTPResponse class so the raw request data is saved.
This class is used by balancer instances when balancer requests to a
backend instance.
"""
def __init__(self, sock, debuglevel=0, strict=0, method=None):
httplib.HTTPResponse.__init__(self, sock, debuglevel, strict, method)
self.data = self.fp.read()
self.fp = cStringIO.StringIO(self.data)
_dev_process = DevProcess()
def GlobalProcess():
"""Returns a global DevProcess object representing the current process."""
return _dev_process
def Enabled():
"""Indicates whether the dev_appserver is running in multiprocess mode."""
return not GlobalProcess().IsDefault()
def HttpServer():
"""Returns the HTTPServer used by this process."""
return GlobalProcess().HttpServer()
def HandleRequestDirectly(request, client_address):
"""Handles the indicated request directly, without additional processing."""
BaseHTTPServer.HTTPServer.process_request(
HttpServer(), request, client_address)
def PosixShutdown():
"""Kills a posix process with os.kill."""
dev_process = GlobalProcess()
children = dev_process.Children()
for term_signal in (signal.SIGTERM, signal.SIGKILL):
for child in children:
if child.process is None:
continue
if child.process.returncode is not None:
continue
pid = child.process.pid
try:
logging.debug('posix kill %d with signal %d', pid, term_signal)
os.kill(pid, term_signal)
except OSError, err:
logging.error('Error encountered sending pid %d signal %d:%s\n',
pid, term_signal, err)
break
time.sleep(0.2)
for child in children:
if child.process is None:
continue
if child.process.returncode is not None:
continue
try:
child.process.wait()
except OSError, e:
if e.errno != errno.ECHILD:
raise e
def Shutdown():
"""Shut down any child processes started."""
dev_process = GlobalProcess()
if not dev_process.IsMaster():
return
if os.name == 'nt':
import ctypes
for child in dev_process.Children():
logging.debug('windows kill ' + str(child.process.pid))
ctypes.windll.kernel32.TerminateProcess(int(child.process._handle), -1)
else:
PosixShutdown()
dev_process.child_api_server.Quit()
def SetLogPrefix(prefix):
"""Adds a prefix to the log handler to identify the process.
Args:
prefix: The prefix string to append at the beginning of each line.
"""
formatter = logging.Formatter(
str(prefix) + ' [%(filename)s:%(lineno)d] %(levelname)s %(message)s')
logging._acquireLock()
try:
for handler in logging._handlerList:
if isinstance(handler, weakref.ref):
handler = handler()
if handler:
handler.setFormatter(formatter)
finally:
logging._releaseLock()
def Init(argv, options, root_path, appinfo):
"""Enter multiprocess mode, if required.
The dev_appserver runs in multiprocess mode if any Backends are configured.
The initial process becomes a "master" which acts as a router for the app, and
centralized memcache/datastore API server for sharing persistent state.
This method works by configuring the global DevProcess object, which is
referenced by other files in the dev_appserver when necessary. The DevProcess
contains state indicating which role the current process plays in the
multiprocess architecture.
The master process creates and shuts down subprocesses. A separate process is
created to represent an instance of the application, and a separate process is
created for each backend (to act as a load balancer) and for each backend
instance.
On shutdown, the master process kills all subprocesses before exiting.
Args:
argv: The command line arguments used when starting the main application.
options: Parsed dictionary of the command line arguments.
root_path: Root directory of the application.
appinfo: An AppInfoExternal object representing a parsed app.yaml file.
"""
if ARG_BACKENDS not in options:
return
backends_path = os.path.join(root_path, 'backends.yaml')
if not os.path.exists(backends_path):
backends = []
else:
backends_fh = open(backends_path)
try:
backend_info = backendinfo.LoadBackendInfo(backends_fh.read())
finally:
backends_fh.close()
backends = backend_info.backends
backend_set = set()
for backend in backends:
if backend.name in backend_set:
raise Error('Duplicate backend: %s' % backend.name)
if backend.instances is None:
backend.instances = 1
elif backend.instances > BACKEND_MAX_INSTANCES:
raise Error('Maximum number of instances is %d', BACKEND_MAX_INSTANCES)
backend_set.add(backend.name)
process = _dev_process
process.Init(appinfo, backends, options)
if process.IsDefault():
logging.info('Default process')
return
SetLogPrefix(process)
if process.IsMaster():
process.StartChildren(argv, options)
process.InitBalanceSet()
if process.IsMaster():
options['require_indexes'] = False
else:
options['require_indexes'] = True
options['clear_datastore'] = False
options['clear_prospective_search'] = False
| |
# -*- coding: utf-8 -*-
u"""zgoubi datafile parser
:copyright: Copyright (c) 2018 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcompat
from pykern import pkio
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
from sirepo import simulation_db
from sirepo.template.lattice import LatticeUtil
from sirepo.template import template_common
from sirepo.template import zgoubi_parser
from sirepo.template.template_common import ModelUnits
import glob
import os.path
import re
import sirepo.sim_data
import zipfile
MODEL_UNITS = None
_SIM_DATA, SIM_TYPE, SCHEMA = sirepo.sim_data.template_globals('zgoubi')
_UNIT_TEST_MODE = False
def import_file(text, unit_test_mode=False):
if unit_test_mode:
global _UNIT_TEST_MODE
_UNIT_TEST_MODE = unit_test_mode
data = simulation_db.default_data(SIM_TYPE)
#TODO(pjm): need a common way to clean-up/uniquify a simulation name from imported text
title, elements, unhandled_elements = zgoubi_parser.parse_file(text, 1)
title = re.sub(r'\s+', ' ', title)
title = re.sub(r'^\s+|\s+$', '', title)
data.models.simulation.name = title if title else 'zgoubi'
if unhandled_elements:
data.models.simulation.warnings = 'Unsupported Zgoubi elements: {}'.format(', '.join(unhandled_elements))
info = _validate_and_dedup_elements(data, elements)
_validate_element_names(data, info)
LatticeUtil(data, SCHEMA).sort_elements_and_beamlines()
if 'missingFiles' in info and info.missingFiles:
data.error = 'Missing data files'
data.missingFiles = info.missingFiles
_update_report_parameters(data)
return data
def is_zip_file(path):
# TODO(e-carlin): use zipfile.is_zip_file
return re.search(r'\.zip$', str(path), re.IGNORECASE)
def tosca_info(tosca):
# determine the list of available files (from zip if necessary)
# compute the tosca length from datafile
#TODO(pjm): keep a cache on the tosca model?
n = _SIM_DATA.lib_file_name_with_model_field('TOSCA', 'magnetFile', tosca.magnetFile)
if not _SIM_DATA.lib_file_exists(n):
return PKDict(
error='missing or invalid file: {}'.format(tosca.magnetFile),
)
error = None
length = None
datafile = _SIM_DATA.lib_file_abspath(n)
if is_zip_file(n):
with zipfile.ZipFile(str(datafile), 'r') as z:
filenames = []
if 'fileNames' not in tosca or not tosca.fileNames:
tosca.fileNames = []
for info in z.infolist():
filenames.append(info.filename)
if not length and info.filename in tosca.fileNames:
length, error = _tosca_length(
tosca,
pkcompat.from_bytes(z.read(info)).splitlines(),
)
if length:
error = None
else:
filenames = [tosca.magnetFile]
with pkio.open_text(datafile) as f:
length, error = _tosca_length(tosca, f)
if error:
return PKDict(error=error)
return PKDict(
toscaInfo=PKDict(
toscaLength=length,
fileList=sorted(filenames) if filenames else None,
magnetFile=tosca.magnetFile,
),
)
def _init_model_units():
# Convert element units (m, rad) to the required zgoubi units (cm, mrad, degrees)
def _changref2(transforms, is_native):
# list of cm or deg values
for t in transforms:
if t.transformType == 'none':
continue
if t.transformType in ('XS', 'YS', 'ZS'):
t.transformValue = ModelUnits.scale_value(t.transformValue, 'cm_to_m', is_native)
elif t.transformType in ('XR', 'YR', 'ZR'):
t.transformValue = ModelUnits.scale_value(t.transformValue, 'deg_to_rad', is_native)
else:
assert False, 'invalid transformType: {}'.format(t.transformType)
return transforms
def _il(v, is_native):
if v == '0':
return v
if is_native:
return '1' if v == '2' else '0'
return '2' if v == '1' else '0'
def _marker_plot(v, is_native):
if is_native:
return v
else:
return '"{}"'.format('.plt' if int(v) else '')
def _xpas(v, is_native):
if is_native:
if re.search(r'\#', v):
return v
v2 = zgoubi_parser.parse_float(v)
if v2 > 1e10:
# old step size format
m = re.search(r'^0*(\d+)\.0*(\d+)', v)
assert m, 'XPAS failed to parse step size: {}'.format(v)
return '#{}|{}|{}'.format(m.group(2), m.group(1), m.group(2))
else:
if re.search(r'\#', str(v)):
v = re.sub(r'^#', '', v)
return '[{}]'.format(','.join(v.split('|')))
return ModelUnits.scale_value(v, 'cm_to_m', is_native)
return ModelUnits(PKDict(
bunch=PKDict(
YR='cm_to_m',
TR='mrad_to_rad',
ZR='cm_to_m',
PR='mrad_to_rad',
SR='cm_to_m',
),
AUTOREF=PKDict(
XCE='cm_to_m',
YCE='cm_to_m',
ALE='mrad_to_rad',
),
BEND=PKDict(
l='cm_to_m',
IL=_il,
X_E='cm_to_m',
LAM_E='cm_to_m',
X_S='cm_to_m',
LAM_S='cm_to_m',
XPAS=_xpas,
XCE='cm_to_m',
YCE='cm_to_m',
),
CAVITE=PKDict(
),
CHANGREF=PKDict(
ALE='deg_to_rad',
XCE='cm_to_m',
YCE='cm_to_m',
),
CHANGREF2=PKDict(
subElements=_changref2,
),
COLLIMA=PKDict(
C1='cm_to_m',
C2='cm_to_m',
C3='cm_to_m',
C4='cm_to_m',
),
DIPOLE=PKDict(
AT='deg_to_rad',
ACN='deg_to_rad',
RM='cm_to_m',
LAM_E='cm_to_m',
SHIFT_E='cm_to_m',
OMEGA_E='deg_to_rad',
THETA_E='deg_to_rad',
R1_E='cm_to_m',
U1_E='cm_to_m',
U2_E='cm_to_m',
R2_E='cm_to_m',
LAM_S='cm_to_m',
SHIFT_S='cm_to_m',
OMEGA_S='deg_to_rad',
THETA_S='deg_to_rad',
R1_S='cm_to_m',
U1_S='cm_to_m',
U2_S='cm_to_m',
R2_S='cm_to_m',
LAM_L='cm_to_m',
SHIFT_L='cm_to_m',
OMEGA_L='deg_to_rad',
THETA_L='deg_to_rad',
R1_L='cm_to_m',
U1_L='cm_to_m',
U2_L='cm_to_m',
R2_L='cm_to_m',
R3_L='cm_to_m',
RE='cm_to_m',
TE='deg_to_rad',
RS='cm_to_m',
TS='deg_to_rad',
),
DRIFT=PKDict(
l='cm_to_m',
),
ffaDipole=PKDict(
ACN='deg_to_rad',
DELTA_RM='cm_to_m',
G0_E='cm_to_m',
SHIFT_E='cm_to_m',
OMEGA_E='deg_to_rad',
THETA_E='deg_to_rad',
R1_E='cm_to_m',
U1_E='cm_to_m',
U2_E='cm_to_m',
R2_E='cm_to_m',
G0_S='cm_to_m',
SHIFT_S='cm_to_m',
OMEGA_S='deg_to_rad',
THETA_S='deg_to_rad',
R1_S='cm_to_m',
U1_S='cm_to_m',
U2_S='cm_to_m',
R2_S='cm_to_m',
G0_L='cm_to_m',
SHIFT_L='cm_to_m',
OMEGA_L='deg_to_rad',
THETA_L='deg_to_rad',
R1_L='cm_to_m',
U1_L='cm_to_m',
U2_L='cm_to_m',
R2_L='cm_to_m',
),
ffaSpiDipole=PKDict(
ACN='deg_to_rad',
DELTA_RM='cm_to_m',
G0_E='cm_to_m',
SHIFT_E='cm_to_m',
OMEGA_E='deg_to_rad',
XI_E='deg_to_rad',
G0_S='cm_to_m',
SHIFT_S='cm_to_m',
OMEGA_S='deg_to_rad',
XI_S='deg_to_rad',
G0_L='cm_to_m',
SHIFT_L='cm_to_m',
OMEGA_L='deg_to_rad',
XI_L='deg_to_rad',
),
FFA=PKDict(
IL=_il,
AT='deg_to_rad',
RM='cm_to_m',
XPAS='cm_to_m',
RE='cm_to_m',
RS='cm_to_m',
),
FFA_SPI=PKDict(
IL=_il,
AT='deg_to_rad',
RM='cm_to_m',
XPAS='cm_to_m',
RE='cm_to_m',
RS='cm_to_m',
),
MARKER=PKDict(
plt=_marker_plot,
),
MULTIPOL=PKDict(
l='cm_to_m',
IL=_il,
R_0='cm_to_m',
X_E='cm_to_m',
LAM_E='cm_to_m',
X_S='cm_to_m',
LAM_S='cm_to_m',
XPAS=_xpas,
XCE='cm_to_m',
YCE='cm_to_m',
),
particleCoordinate=PKDict(
Y='cm_to_m',
Z='cm_to_m',
S='cm_to_m',
T='mrad_to_rad',
P='mrad_to_rad',
),
QUADRUPO=PKDict(
l='cm_to_m',
IL=_il,
R_0='cm_to_m',
X_E='cm_to_m',
LAM_E='cm_to_m',
X_S='cm_to_m',
XPAS=_xpas,
LAM_S='cm_to_m',
XCE='cm_to_m',
YCE='cm_to_m',
),
SEXTUPOL=PKDict(
l='cm_to_m',
IL=_il,
R_0='cm_to_m',
X_E='cm_to_m',
LAM_E='cm_to_m',
X_S='cm_to_m',
XPAS=_xpas,
LAM_S='cm_to_m',
XCE='cm_to_m',
YCE='cm_to_m',
),
SOLENOID=PKDict(
l='cm_to_m',
IL=_il,
R_0='cm_to_m',
X_E='cm_to_m',
X_S='cm_to_m',
XPAS=_xpas,
XCE='cm_to_m',
YCE='cm_to_m',
),
TOSCA=PKDict(
IL=_il,
A='cm_to_m',
B='cm_to_m',
C='cm_to_m',
XPAS=_xpas,
XCE='cm_to_m',
YCE='cm_to_m',
RE='cm_to_m',
RS='cm_to_m',
),
))
def _tosca_length(tosca, lines):
col2 = []
count = 0
for line in lines:
count += 1
if count <= tosca.headerLineCount:
continue
# some columns may not have spaces between values, ex:
# -1.2000E+02 0.0000E+00-3.5000E+01 3.1805E-03-1.0470E+01 2.0089E-03-2.4481E-15
line = re.sub(r'(E[+\-]\d+)(\-)', r'\1 \2', line, flags=re.IGNORECASE)
values = line.split()
if len(values) > 2:
try:
col2.append(zgoubi_parser.parse_float(values[2]))
except ValueError:
pass
if not col2:
return None, 'missing column 2 data in file: {}'.format(tosca.magnetFile)
# scaled by unit conversion XN
return (max(col2) - min(col2)) / 100.0 * tosca.XN, None
def _update_report_parameters(data):
if data.models.bunch.method == 'OBJET2.1':
for name in ('bunchAnimation', 'bunchAnimation2', 'energyAnimation'):
m = data.models[name]
m.showAllFrames = '1'
mparticleSelector = '1'
def _validate_and_dedup_elements(data, elements):
beamline = []
current_id = 1
data.models.beamlines = [
PKDict(
name='BL1',
id=current_id,
items=beamline,
),
]
data.models.simulation.activeBeamlineId = current_id
data.models.simulation.visualizationBeamlineId = current_id
info = PKDict(
ids=[],
names=[],
elements=[],
missingFiles=[],
)
for el in elements:
_validate_model(el.type, el, info.missingFiles)
if 'name' in el:
name = el.name
#TODO(pjm): don't de-duplicate certain types
if el.type != 'MARKER' and not re.search(r'^DUMMY ', name):
del el['name']
if el not in info.elements:
current_id += 1
info.ids.append(current_id)
info.names.append(name)
info.elements.append(el)
beamline.append(info.ids[info.elements.index(el)])
else:
if el.type in data.models:
pkdlog('updating existing {} model', el.type)
data.models[el.type].update(el)
else:
_SIM_DATA.update_model_defaults(el, el.type)
data.models[el.type] = el
return info
def _validate_element_names(data, info):
names = PKDict()
for idx in range(len(info.ids)):
el = info.elements[idx]
_SIM_DATA.update_model_defaults(el, el.type)
el._id = info.ids[idx]
name = info.names[idx]
name = re.sub(r'\\', '_', name)
name = re.sub(r'(\_|\#)$', '', name)
if not name:
name = el.type[:2]
if name in names:
count = 2
while True:
name2 = '{}{}'.format(name, count)
if name2 not in names:
name = name2
break
count += 1
el.name = name
names[name] = True
data.models.elements.append(el)
def _validate_field(model, field, model_info):
if field in ('_id', 'type'):
return
assert field in model_info, \
'unknown model field: {}.{}, value: {}'.format(model.type, field, model[field])
field_info = model_info[field]
field_type = field_info[1]
if field_type == 'Float':
model[field] = zgoubi_parser.parse_float(model[field])
elif field_type == 'Integer':
model[field] = int(model[field])
elif field_type == 'FileNameArray':
return _validate_file_names(model, model[field])
elif field_type in SCHEMA.enum:
for v in SCHEMA.enum[field_type]:
if v[0] == model[field]:
return
pkdlog('invalid enum value, {}.{} {}: {}', model.type, field, field_type, model[field])
model[field] = field_info[2]
def _validate_file_names(model, file_names):
if _UNIT_TEST_MODE:
return
#TODO(pjm): currently specific to TOSCA element, but could be generalizaed on model.type
# flatten filenames, search indiviual and zip files which contains all files, set magnetFile if found
for idx in range(len(file_names)):
file_names[idx] = os.path.basename(file_names[idx])
file_type = '{}-{}'.format(model.type, 'magnetFile')
magnet_file = None
if len(file_names) == 1:
name = file_names[0]
target = _SIM_DATA.lib_file_name_with_model_field(model.type, 'magnetFile', name)
if _SIM_DATA.lib_file_exists(target):
magnet_file = name
for f in _SIM_DATA.zgoubi_lib_files_with_zip():
zip_has_files = True
zip_names = []
with zipfile.ZipFile(str(f), 'r') as z:
for info in z.infolist():
zip_names.append(info.filename)
for name in file_names:
if name not in zip_names:
zip_has_files = False
break
if zip_has_files:
magnet_file = os.path.basename(str(f))[len(file_type) + 1:]
break
if magnet_file:
model.magnetFile = magnet_file
info = tosca_info(model)
if 'toscaInfo' in info:
model.l = info.toscaInfo.toscaLength
return
return PKDict({
model.type: sorted(file_names),
})
def _validate_model(model_type, model, missing_files):
assert model_type in SCHEMA.model, \
'element type missing from schema: {}'.format(model_type)
model_info = SCHEMA.model[model_type]
if 'name' in model_info and 'name' not in model:
model.name = ''
MODEL_UNITS.scale_from_native(model_type, model)
for f in list(model.keys()):
if isinstance(model[f], list) and model[f] and 'type' in model[f][0]:
for sub_model in model[f]:
_validate_model(sub_model.type, sub_model, missing_files)
continue
err = _validate_field(model, f, model_info)
if err:
missing_files.append(err)
MODEL_UNITS = _init_model_units()
| |
#Cancer Sim
from numpy import *
import scipy as sp
import pylab as py
import math
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.colorbar as cb
import matplotlib.pyplot as plt
import cPickle as pickle
from scipy.spatial.distance import euclidean
from math import pow
from scipy.spatial import Delaunay
#from scipy.spatial import KDTree
from scipy.spatial import cKDTree
from hybridKDTree import KDTree
import random
import time
import pprint
#XSIZE = 20
#YSIZE = 20
from neighborlist import NeighborList
from helper import norm, unitize, disp_func, unitize_arr
import links, cells
from logger import logger
base_logger = logger.getChild('cancer')
base_logger.info('Inside the cancer.py module')
########################################################
### Simulation Class ###################################
########################################################
#try to speed things up a little bit
from scipy import zeros_like, nan_to_num, allclose
import numexpr as ne
import os
if 'CANCERC' in os.environ:
CANCERC = True
#import pyximport
#pyximport.install()
from forcefunccelltypes import force_func_hertz, force_func_basal, norm, disp_func
base_logger.info('CYTHON SUPPORT')
else:
CANCERC = False
force_func_basal = None
force_func_hertz = None
base_logger.info('NO CYTHON SUPPORT')
class CancerSim:
"""
The main Cancer Simulation Class.
Creates an array of Cells, allows for the designation of cancer cells
And the evolution of the cells thereafter.
"""
def __init__(self,config):
""" Initialize the simulation """
#load the configs
self.config = config
self.XSIZE = config['XSIZE']
self.YSIZE = config['YSIZE']
self.boxsize = (self.XSIZE,self.YSIZE)
if config['seed'] is None:
self.seed = int(time.time())
else:
self.seed = config['seed']
self.xi = config['force_cutoff']
self.a = config['force_magnitude']
self.basalstrength = config['force_magnitude_basal']
self.basalcutoff = config['force_cutoff_basal']
self.basal_height = config['basal_height']
self.basal_wavenumber = config['basal_wavenumber']
self.basal_amplitude = config['basal_amplitude']
self.pressure_filename = config['pressure_filename']
self.cancer_evolution_filename = config['cancer_evolution_filename']
sp.random.seed(self.seed)
random.seed(self.seed)
#KDTree
#self._kdtree = None
#self._kdtree_cache_T = -1
self._updated = True
self.T = 0
# cell types (should be arguments)
self.cancer = cells.CellType(**config['cancer_cell_params'])
self.epidermal = cells.CellType(**config['epidermal_cell_params'])
self.basal = cells.CellType(**config['basal_cell_params'])
self.dermal = cells.CellType(**config['dermal_cell_params'])
self.corneum = cells.CellType(**config['stratum_corneum_cell_params'])
self.num_cells = 0
# containers
self.links = links.Links()
self._cell_arr = sp.array([])
self.cells = []
self._ghosts = []
self._ghost_cutoff = 4
self._ghost_offset = sp.array([self.boxsize[0],0.])
self.cancer_cells = []
self.logger = base_logger.getChild('CancerSim')
self.logger.info('Initializing CancerSim')
self.neighs = None
def _setup(self):
self._triang_lattice()
self.jiggle(sigma=self.config['jiggle_sigma'])
self.delaunay()
self._freeze_links()
XSIZE, YSIZE = self.boxsize
period = 2*3.141592*self.basal_wavenumber/XSIZE
self.add_cancer_cell([self.XSIZE/2.+self.config['first_cancer_cell_xoffset'], self.basal_height + self.basal_amplitude*sin((self.XSIZE/2+self.config['first_cancer_cell_xoffset'])*period) + self.config['first_cancer_cell_yoffset']], self.config['first_cancer_cell_radius'])
def _triang_lattice(self):
""" Create a triangular grid of points """
XSIZE, YSIZE = self.boxsize
period = 2*3.141592*self.basal_wavenumber/XSIZE
self.logger.info('Setting up the Triangular Lattice...')
#setup the epicells
epispacing = self.epidermal.L
xspace,yspace = epispacing , epispacing * sp.sqrt(3)
for i in sp.arange(0,XSIZE,xspace):
for ind,j in enumerate(sp.arange(self.basal_height-self.basal_amplitude+5.0*self.basalcutoff,YSIZE,yspace)):
if ind:
pass
if j >= self.basal_height+self.basal_amplitude*sin(i*period)+5.0*self.basalcutoff :
cell1 = cells.Cell([i,j],self.epidermal,self.num_cells)
#print 'added epicell at', i, j
self.add_cell(cell1)
if (j+0.5*yspace) > self.basal_height+self.basal_amplitude*sin((i+0.5*xspace)*period) :
cell2 = cells.Cell([i+0.5*xspace,j+0.5*yspace],self.epidermal,self.num_cells)
#print 'added epicell at', i+0.5*xspace, j+0.5*yspace
self.add_cell(cell2)
#add ghosts for first few layers
if i<self._ghost_cutoff:
if ind:
if j >= self.basal_height+self.basal_amplitude*sin(i*period)+5.0*self.basalcutoff :
ghost1 = cells.GhostCell(cell1,XSIZE,1)
self._ghosts.append(ghost1)
if (j+0.5*yspace) > self.basal_height+self.basal_amplitude*sin((i+0.5*xspace)*period) :
ghost2 = cells.GhostCell(cell2,XSIZE,1)
self._ghosts.append(ghost2)
#add ghosts for last few layers
if i>(XSIZE-self._ghost_cutoff):
if ind:
if j >= self.basal_height+self.basal_amplitude*sin(i*period)+5.0*self.basalcutoff :
ghost1 = cells.GhostCell(cell1,XSIZE,-1)
self._ghosts.append(ghost1)
if (j+0.5*yspace) > self.basal_height+self.basal_amplitude*sin((i+0.5*xspace)*period) :
ghost2 = cells.GhostCell(cell2,XSIZE,-1)
self._ghosts.append(ghost2)
#setup the bottom cells
dermalspacing = self.dermal.L
xspace,yspace = dermalspacing , dermalspacing*sp.sqrt(3)
for i in sp.arange(0,XSIZE,xspace):
for ind,j in enumerate(sp.arange(self.basal_height+self.basal_amplitude-5.0*self.basalcutoff,0,-yspace)):
if j<= self.basal_height+self.basal_amplitude*sin(i*period)-5.0*self.basalcutoff :
cell1 = cells.Cell([i,j],self.dermal,self.num_cells)
#print 'added dermacell at', i, j
self.add_cell(cell1)
if ind and (j+0.5*yspace) <= self.basal_height+self.basal_amplitude*sin((i+0.5*xspace)*period)-5.0*self.basalcutoff:
cell2 = cells.Cell([i+0.5*xspace,j+0.5*yspace],self.dermal,self.num_cells)
#print 'added dermacell at', i+0.5*xspace, j+0.5*yspace
self.add_cell(cell2)
#add ghosts for first few layers
if i<self._ghost_cutoff:
if j<= self.basal_height+self.basal_amplitude*sin(i*period)-5*self.basalcutoff :
ghost1 = cells.GhostCell(cell1,XSIZE,1)
ghost2 = cells.GhostCell(cell2,XSIZE,1)
self._ghosts.extend([ghost1,ghost2])
#add ghosts for last few layers
if i>(XSIZE-self._ghost_cutoff):
if j<= self.basal_height+self.basal_amplitude*sin(i*period)-5.0*self.basalcutoff :
ghost1 = cells.GhostCell(cell1,XSIZE,-1)
ghost2 = cells.GhostCell(cell2,XSIZE,-1)
self._ghosts.extend([ghost1,ghost2])
#setup the middle cells
basalspacing = self.basal.L
for i in sp.arange(0,XSIZE,basalspacing/2):
cell = cells.Cell([i,self.basal_height+self.basal_amplitude*sin(i*period)],self.basal,self.num_cells)
#print 'added basalcell at', i, self.basal_height+self.basal_amplitude*sin(i*period)
self.add_cell(cell)
if i<self._ghost_cutoff:
ghost = cells.GhostCell(cell,XSIZE,1)
self._ghosts.append(ghost)
if i>(XSIZE-self._ghost_cutoff):
ghost = cells.GhostCell(cell,XSIZE,-1)
self._ghosts.append(ghost)
#setup the corneum cells
corneumspacing = self.corneum.L
for i in sp.arange(0,XSIZE,corneumspacing):
cell = cells.Cell([i,YSIZE+2.0*self.basalcutoff],self.corneum,self.num_cells)
#print 'added corneumcell at', i, YSIZE
self.add_cell(cell)
if i<self._ghost_cutoff:
ghost = cells.GhostCell(cell,XSIZE,1)
self._ghosts.append(ghost)
if i>(XSIZE-self._ghost_cutoff):
ghost = cells.GhostCell(cell,XSIZE,-1)
self._ghosts.append(ghost)
self.logger.info('Set up the Triangular Lattice')
def get_pos_arr(self,force=False):
""" Get an array of all of the cell positions """
#if self._updated is False or force:
# return self._cell_arr
self._cell_arr = sp.zeros((len(self.cells),2))
for (i,cell) in enumerate(self.cells):
self._cell_arr[i] = cell.pos
self._updated = False
return self._cell_arr
def get_radius_arr(self):
rad_arr=sp.zeros(len(self.cells))
for (i,cell) in enumerate(self.cells):
rad_arr[i] = cell.radius
return rad_arr
def _get_kdtree(self,force=False,new=True):
""" Generate a KDTree for the cells,
allows for efficient geometric neighbor computation """
#if new or self._kdtree_cache_T != self.T or self._updated:
pos = self.get_pos_arr(force).copy()
_kdtree = KDTree(pos)
return _kdtree
def _get_ckdtree(self,force=False):
""" Generate a cKDTree """
pos = self.get_pos_arr(force).copy()
return cKDTree(pos)
def _query_point(self,x,r,eps=None):
""" Get all of the cell inds near point, with radius r """
kdtree = self._get_kdtree()
if eps:
cell_inds = kdtree.query_ball_point(x,r,eps)
else:
cell_inds = kdtree.query_ball_point(x,r)
cells = [ self.cells[ind] for ind in cell_inds ]
return cells
def _get_vel_arr(self):
""" Get an array of all of the cell velocities """
vel_arr = sp.zeros((self.num_cells,2))
for (i,cell) in enumerate(self.cells):
vel_arr[i] = cell.vel
return vel_arr
def _update_pos(self,pos_arr):
""" Update all of the cell positions with an array """
for (pos,cell) in zip(pos_arr,self.cells):
#enact the periodic boundary conditions
pos[0] = pos[0]%self.XSIZE
cell.pos = pos
self._cell_arr = pos_arr
#self._updated = True
def _update_vel(self,vel_arr):
""" Update all of the cell velocities with an array """
for (vel,cell) in zip(vel_arr,self.cells):
cell.vel = vel
def _get_ghost_pos_arr(self):
""" Get all of the ghost positions """
arr = sp.zeros((len(self._ghosts),2))
for ind,cell in enumerate(self._ghosts):
arr[ind] = cell.pos
return arr
def _update_ghosts(self):
""" Update the positions of all of the ghost cells """
for ghost in self._ghosts:
ghost.update()
def jiggle(self,sigma=0.1,ghosts=True):
""" Jiggle the atom positions """
pos = self.get_pos_arr()
sigarr = sp.array([cell.type.L for cell in self.cells])
randn = sp.randn(self.num_cells,2)
newpos = pos + sigma*(sigarr*randn.T).T
self._update_pos(newpos)
self._updated = True
if ghosts:
self._update_ghosts()
self.logger.info('Jiggled the atoms')
def _set_radii(self):
""" set radii as the average of the links starting from each cell """
for cell in [cell for cell in self.cells if cell.type == self.epidermal]:
average_length=0.0
count=0.
for neigh in self.links.get_neighbors(cell):
average_length += self.links.get_link(cell,neigh).L/2.0
count += 1.
if count:
cell.radius=average_length/count
for cell in [cell for cell in self.cells if cell.type == self.dermal]:
cell.radius=self.epidermal.L/2.0
def _set_radii_min(self):
""" set radii as the smallest link size """
for cell in [cell for cell in self.cells if cell.type == self.epidermal]:
min_length = min([link.L/2. for link in self.links.get_links(cell)])
#rint min_length
cell.radius=min_length
for cell in [cell for cell in self.cells if cell.type == self.dermal]:
cell.radius=self.epidermal.L/2.0
def _freeze_links(self):
""" Adjust all of the links to be their current extension """
for link in self.links:
link.L = link.extension_without_breaking()
if (link.one.type.name == 'Dermal'):
if (link.two.type.name == 'Basal') :
print link.one, link.two, link.L
if (link.one.type.name == 'Epidermal'):
if (link.two.type.name == 'Basal') :
print link.one, link.two, link.L
self._set_radii_min()
self.logger.info('Froze the links in place')
def _filter_ghosts(self,one,two):
if isinstance(one,cells.GhostCell) and isinstance(two,cells.GhostCell):
raise Exception("DoubleGhost")
elif isinstance(one,cells.GhostCell):
return one.original,two
elif isinstance(two,cells.GhostCell):
return one,two.original
else:
return one,two
def _clear_links(self):
""" Clear all Links """
self.links = links.Links()
def delaunay(self):
""" Delaunay routine, sets the initial links """
self.logger.debug('Running the Delaunay routine')
#first get the positions of all the cells and the ghosts
num_cells = len(self.cells)
num_ghosts = len(self._ghosts)
fulllist = self.cells + self._ghosts
num_full = len(fulllist)
arr = sp.zeros((num_full,2))
for ind,cell in enumerate(fulllist):
arr[ind] = cell.pos
#get the Delaunay construction
tri = Delaunay(arr)
#add the links
for i,j,k in tri.vertices:
cellone = fulllist[i]
celltwo = fulllist[j]
cellthree = fulllist[k]
length_of_bond = norm(cellone.pos - celltwo.pos)
expected_length = 0.5*(cellone.type.L + celltwo.type.L)
if length_of_bond < 2*expected_length:
try:
one,two = self._filter_ghosts(cellone,celltwo)
self.add_bond(one,two)
except Exception, e:
if e.message=="DoubleGhost":
pass
else:
raise
try:
one,two = self._filter_ghosts(celltwo,cellthree)
self.add_bond(one,two)
except Exception, e:
if e.message=="DoubleGhost":
pass
else:
raise
try:
one,two = self._filter_ghosts(cellthree,cellone)
self.add_bond(one,two)
except Exception, e:
if e.message=="DoubleGhost":
pass
else:
raise
def add_cell(self,cell):
""" Add the cell: cell """
self.cells.append(cell)
self.num_cells += 1
self._updated = True
self.logger.debug('Adding the cell {cell}'.format(cell=cell))
def add_bond(self,one,two):
""" Add a bond between cells one and two """
self.links.add_link(one,two,xsize=self.XSIZE)
self.logger.debug('Adding a bond between {one} and {two}'.format(one=one,two=two))
def remove_bond(self,one,two):
""" Remove a bond between cells one and two """
self.links.remove_link(one,two)
self.logger.debug('Removed the link between {one} and {two}'.format(one=one,two=two))
def remove_cell(self,cell):
""" Remove the cell: cell, and all bonds for that cell """
self.cells.remove(cell)
self.links.remove_cell(cell)
self.logger.debug('Removed the cell {cell}'.format(cell=cell))
def get_neighbors(self,cell):
""" Get the linked neighbor cells of cell """
return self.links.get_neighbors(cell)
def add_cancer_cell(self,x,r,eps=None):
file=open(self.cancer_evolution_filename,'a')
""" randomly make a cell a cancer cell """
cells = self._query_point(x,r,eps)
cells = [cell for cell in cells if cell.type != self.basal]
if cells:
cell = random.choice(cells)
self.cancer_cells.append(cell)
self.links.remove_cell(cell)
cell.type = self.cancer
s = str(cell.pos[0]) + ' ' + str(cell.pos[1]) + '\n'
file.write(s)
self.logger.info('Added a cancer cell: {cell}'.format(cell=cell))
self._updated = True
else:
raise Exception("No targets found at {} within radius {}".format(x,r))
file.close
def duplicate_cancer_cell(self,cancer=None,disp_frac = 0.01):
""" Duplicate the cancer cell: cancer """
if cancer is None:
cancer = random.choice(self.cancer_cells)
file=open(self.cancer_evolution_filename,'a')
self.logger.info('Duplicating a cancer cell...')
#need to choose a random direction and do the relaxation
L = disp_frac * cancer.type.L
theta = sp.rand()*2*sp.pi
disp = L * sp.array([sp.sin(theta),sp.cos(theta)])
newcell = cells.Cell(cancer.pos + disp,self.cancer,self.num_cells)
newcell.radius = cancer.radius
cancer.pos = cancer.pos - disp
s = str(cancer.pos[0]) + ' ' + str(cancer.pos[1]) + '\n'
file.write(s)
self.cancer_cells.append(newcell)
self.add_cell(newcell)
"""
neighs = self.links.get_neighbors(cancer).copy()
for neigh in neighs:
link_disp = neigh.pos - cancer.pos
if sp.vdot(link_disp,disp) >= 0:
#remove old link, create new one.
self.links.remove_link(cancer,neigh)
self.links.add_link(newcell,neigh)
"""
#self.links.add_link(newcell,cancer)
self._updated = True
file.close
def time_step(self):
""" Run a time step, duplicate a cancer cell,
do a FIRE relaxation, and plot """
self.logger.info('Running a time step')
self.duplicate_cancer_cell()
self.fire()
self.plot_sized_cells()
self.T += 1
def plot_cells(self,clf=True,fignum=1,ghosts=False,*args,**kwargs):
""" Plot the current configuration """
self.logger.info('Plotting the cells')
pos_arr = self.get_pos_arr()
py.figure(fignum)
if clf:
py.clf()
py.scatter(pos_arr[:,0],pos_arr[:,1],
c=[i.type.color for i in self.cells],
s=50,
zorder=10,
*args,**kwargs)
if ghosts:
ghost_arr = self._get_ghost_pos_arr()
py.scatter(ghost_arr[:,0],ghost_arr[:,1],
c = [i.original.type.color for i in self._ghosts],
s = 30,
zorder=10,
alpha = 0.3,
*args,**kwargs)
py.axis('equal')
def my_circle_scatter(self, axes, x_array, y_array, rad_array, col_array, **kwargs):
for x, y, R, c in zip(x_array, y_array , rad_array, col_array):
circle = py.Circle((x,y), radius=R, color = c, **kwargs)
axes.add_patch(circle)
return True
def plot_sized_cells_old(self,clf=True,fignum=1,ghosts=False,*args, **kwargs):
""" Plot the current configuration using circles"""
self.logger.info('Plotting Sized Cells')
pos_arr = self.get_pos_arr()
rad_arr = self.get_radius_arr()
col_arr = [i.type.color for i in self.cells]
py.figure(fignum)
if clf:
py.clf()
axes=py.axes()
self.my_circle_scatter(axes,
pos_arr[:,0],
pos_arr[:,1],
rad_arr, col_arr, alpha=0.6,**kwargs)
if ghosts:
ghost_arr = self._get_ghost_pos_arr()
py.scatter(ghost_arr[:,0],ghost_arr[:,1],
c = [i.original.type.color for i in self._ghosts],
s = 30,
zorder=10,
alpha = 0.3,
*args,**kwargs)
py.xlim((0,self.XSIZE))
py.axis('equal')
def plot_sized_cells(self,clf=True,fignum=1,ghosts=False,*args, **kwargs):
""" Plot the current configuration using circles"""
self.logger.info('Plotting Sized Cells')
pos_arr = self.get_pos_arr()
rad_arr = self.get_radius_arr()
pos = self.get_pos_arr(force=True)
pressure_arr = zeros_like(pos)
#kdtree = self._get_kdtree(force=True)
for i,j in self._get_npairs(): #kdtree.query_pairs(self.xi*1.0):
force = self.force_func_celltypes(self.cells[i], self.cells[j] )
pressure_arr[i] += fabs(force)
pressure_arr[j] += fabs(force)
pressure_arr = nan_to_num(pressure_arr)
#print "\n"
#print pressure_arr
#print "\n"
cancer_cell_pressures = empty(len(self.cancer_cells))
numero_cancer = 0
numero_cell = 0
for i in self.cells:
if i.type.name == 'Cancer' :
cancer_cell_pressures[numero_cancer]=norm(pressure_arr[numero_cell])/(3.141592*rad_arr[numero_cell]*rad_arr[numero_cell])
numero_cancer = numero_cancer + 1
numero_cell = numero_cell + 1
#printing stress on file
file=open(self.pressure_filename,'a')
#factor is 4/3( E/(1-nu^2)) = 3/2 kPa
factor = 1.5
for i in range(0,len(cancer_cell_pressures)):
s = str(i) + ' ' + str(cancer_cell_pressures[i]*factor) +'\n'
file.write(s)
s = '\n'
file.write(s)
#s = str(numero_cancer) + ' ' + str(cancer_cell_pressures.mean()) +'\n'
#file.write(s)
#s = '\n'
file.close
if len(cancer_cell_pressures)>1 :
cancer_cell_pressures = (cancer_cell_pressures-cancer_cell_pressures.min())/(cancer_cell_pressures.max()-cancer_cell_pressures.min())*0.9+0.1
#print "\n"
#print cancer_cell_pressures
#print "\n"
else :
cancer_cell_pressures[0] = 0.5
#print '\n'
#print cancer_cell_pressures
#print '\n'
col_arr = []
numero_cancer = 0
for i in self.cells:
if i.type.name == 'Cancer' :
rgb_color = cm.hot(1-cancer_cell_pressures[numero_cancer],1.0)
col_arr.append(rgb_color)
#print '\n'
#print rgb_color , cancer_cell_forces[numero_cancer]
#print '\n'
numero_cancer = numero_cancer + 1
else :
col_arr.append(i.type.color)
#cb.Colorbar(col_arr,kwargs)
#print '\n'
#print col_arr
#print '\n'
#file=open(self.screenshot_filename,'a')
#for i in range(0, len(pos_arr)):
# s = self.cells[i].type.name + ' ' + str(pos_arr[i][0]) + ' ' + str(pos_arr[i][1]) + ' ' + str(rad_arr[i]) + ' ' + str(col_arr[i]) +'\n'
# file.write(s)
#file.close
py.figure(fignum)
if clf:
py.clf()
axes=py.axes()
self.my_circle_scatter(axes,
pos_arr[:,0],
pos_arr[:,1],
rad_arr, col_arr, alpha=0.6,**kwargs)
if ghosts:
ghost_arr = self._get_ghost_pos_arr()
py.scatter(ghost_arr[:,0],ghost_arr[:,1],
c = [i.original.type.color for i in self._ghosts],
s = 30,
zorder=10,
alpha = 0.3,
*args,**kwargs)
py.xlim((0,self.XSIZE))
py.axis('equal')
def plot_links(self,clf=False,cutoff=None,fignum=1,ghosts=False,*args,**kwargs):
""" Plot the links between cells """
self.logger.info('Plotting Links')
if cutoff is None:
cutoff = self.XSIZE/2.
py.figure(fignum)
if clf:
py.clf()
#file=open(self.screenshot_filename,'a')
for link in self.links:
if link.C_10 > 0:
#s = 'Link' + ' ' + str(link.one.pos[0]) + ' ' + str(link.one.pos[1]) + ' ' + str(link.two.pos[0]) + ' ' + str(link.two.pos[1]) +'\n'
#file.write(s)
d12=link.one.pos-link.two.pos
abs_d12=norm(d12)
if abs_d12 < cutoff:
data = sp.array([ link.one.pos, link.two.pos ])
py.plot(data[:,0],data[:,1],
c=py.cm.jet( min(link.energy*30.,1.) ),
alpha=0.6,
*args, **kwargs )
#file.close
def _get_pairs(self):
kdtree = self._get_kdtree(force=True)
return kdtree.query_pairs(self.xi*1.0)
def _get_cpairs(self,num=100):
pos = self.get_pos_arr(force=True)
ckdtree = self._get_ckdtree(force=False)
ds,neighs = ckdtree.query(pos,num,distance_upper_bound=self.xi)
pairs = set()
N = len(neighs)
for (i,j),k in sp.ndenumerate(neighs):
# if cmp(i,k) < 1:
# pairs.add((i,k))
# else:
# pairs.add((k,i))
if k < N and (i,k) not in pairs and (k,i) not in pairs:
pairs.add((i,k))
return pairs
def _get_npairs(self):
if self.neighs is None:
self.neighs = NeighborList([self.xi]*self.num_cells)
self.neighs.update(self)
return ((i,j) for i in range(self.num_cells) for j in self.neighs.get_neighbors(i) )
@property
def forces(self):
""" get the forces between cells, as array, both from links
and from the native force_func
"""
self.logger.info('Computing forces')
pos = self.get_pos_arr(force=True)
force_arr = zeros_like(pos)
for link in self.links:
force = link.force
force_arr[link.one.index] += force
force_arr[link.two.index] -= force
#kdtree = self._get_kdtree(force=True)
for i,j in self._get_npairs(): #kdtree.query_pairs(self.xi*1.0):
force = self.force_func_celltypes(self.cells[i], self.cells[j] )
#disp = self.cells[i].pos - self.cells[j].pos
#L = norm(disp)
#force = 2 * self.a**4 * ( 2 * self.xi**2 - 3 * self.xi * L + L**2 )/( self.xi**2 * L**6 ) * disp
force_arr[i] += force
force_arr[j] -= force
return nan_to_num(force_arr)
def force_func(self,cell1,cell2):
""" the native force function between two positions """
x1 = cell1.pos
x2 = cell2.pos
disp = x1 - x2
mod_disp = norm(disp)
force = 2 * self.a**4 * ( 2 * self.xi**2 - 3 * self.xi * mod_disp + mod_disp**2 )/( self.xi**2 * mod_disp**6 ) * disp
return force
def force_func2(self,cell1,cell2):
""" the native force function between two positions, second attempt """
x1 = cell1.pos
x2 = cell2.pos
r1 = cell1.radius
r2 = cell2.radius
disp = x1 - x2
mod_disp = norm(disp)
a1=self.a*(r1+r2)
xi1=self.xi*(r1+r2)
force = 2 * a1**4 * ( 2 * xi1**2 - 3 * xi1 * mod_disp + mod_disp**2 )/( xi1**2 * mod_disp**6 ) * disp
return force
def force_func_hertz(self,cell1,cell2):
""" the Hertz force between two cells """
x1 = cell1.pos
x2 = cell2.pos
r1 = cell1.radius
r2 = cell2.radius
disp = x1 - x2
mod_disp = norm(disp)
delta=(r1+r2)-mod_disp
if delta > 0.0:
force = self.a*delta**1.5*disp/mod_disp
else:
force= 0.0
return force
def force_func_celltypes_old(self,cell1,cell2):
""" Try to case out the cell types """
x1 = cell1.pos
x2 = cell2.pos
#use the Cython dispfunc
disp = disp_func(x1,x2,self.XSIZE)
mod_disp = norm(disp)
force = 0.0
if cell1.type==self.basal and cell2.type==self.basal:
#We have two basal cells
force = 0.0
elif cell1.type==self.basal or cell2.type==self.basal:
#We have one basal cell
if mod_disp <= self.basalcutoff:
oldexpr = '2 * self.basalstrength**4 * ( 2 * self.basalcutoff**2 - 3 * self.basalcutoff * mod_disp + mod_disp**2 )/( self.basalcutoff**2 * mod_disp**6 ) * disp'
basalstrength = self.basalstrength
basalcutoff = self.basalcutoff
forcestr = '2 * basalstrength**4 * ( 2 * basalcutoff**2 - 3 * basalcutoff * mod_disp + mod_disp**2 )/( basalcutoff**2 * mod_disp**6 ) * disp'
force = ne.evaluate(forcestr)
else:
#We have some other situation
r1 = cell1.radius
r2 = cell2.radius
delta=(r1+r2)-mod_disp
if delta > 0:
a = self.a
oldexp = 'sqrt(r1*r2/(r1+r2)) * self.a * delta**1.5*disp/mod_disp'
forcestr = 'sqrt(r1*r2/(r1+r2)) * a * delta**1.5*disp/mod_disp'
force = ne.evaluate(forcestr)
#print 'force', force
return force
def force_func_celltypes(self,cell1,cell2):
""" Try to case out the cell types """
x1 = cell1.pos
x2 = cell2.pos
#use the Cython dispfunc
disp = disp_func(x1,x2,self.XSIZE)
mod_disp = norm(disp)
force = 0.0
if cell1.type==self.basal and cell2.type==self.basal:
#We have two basal cells
force = 0.0
#elif cell1.type==self.basal or cell2.type==self.basal:
#We have one basal cell
# if mod_disp <= self.basalcutoff:
# oldexpr = '2 * self.basalstrength**4 * ( 2 * self.basalcutoff**2 - 3 * self.basalcutoff * mod_disp + mod_disp**2 )/( self.basalcutoff**2 * mod_disp**6 ) * disp'
# basalstrength = self.basalstrength
# basalcutoff = self.basalcutoff
# forcestr = '2 * basalstrength**4 * ( 2 * basalcutoff**2 - 3 * basalcutoff * mod_disp + mod_disp**2 )/( basalcutoff**2 * mod_disp**6 ) * disp'
# force = ne.evaluate(forcestr)
else:
#We have some other situation
r1 = cell1.radius
r2 = cell2.radius
min_radius = min(r1,r2)
renormalized_r = r1*r2/(r1+r2)
delta=(r1+r2)-mod_disp
if delta > 0:
omega = pow(delta/renormalized_r,1.5)
a = self.a
forcestr = 'sqrt(renormalized_r) * a * delta**1.5*(1 + 1.15*omega**0.34 +9.5*omega + 9.288*omega**2)/(1+2.3*omega)*disp/mod_disp'
force = ne.evaluate(forcestr)
#print cell1.type, cell2.type, 'delta:', delta
else :
if cell1.type==self.cancer and cell2.type==self.cancer:
#alpha = 0.1
#forcestr = '-6*(alpha/(-delta))**7'
if -delta<2.0 :
forcestr = '0.001*delta'
else :
forcestr = '0.0'
force = ne.evaluate(forcestr)
print '\n'
print cell1.type, cell2.type, 'delta<0:', delta, 'force:', force
print '\n'
return force
def force_func_celltypes_cython(self,cell1,cell2):
""" Try to case out the cell types """
x1 = cell1.pos
x2 = cell2.pos
if cell1.type==self.basal and cell2.type==self.basal:
#We have two basal cells
force = 0.0
elif cell1.type==self.basal or cell2.type==self.basal:
#We have one basal cell
force = force_func_basal(x1,x2,self.basalstrength,self.XSIZE)
else:
#We have some other situation
r1 = cell1.radius
r2 = cell2.radius
force = force_func_hertz(x1,x2,r1,r2,self.a,self.XSIZE)
return force
@property
def energy(self):
""" get the energy of the current configuration """
tot_energy = 0
for link in self.links:
tot_energy += link.energy
return tot_energy
def fire(self):
""" Do a fire relaxation """
#load params
fmax = self.config['fmax']
Nmin = self.config['Nmin']
finc = self.config['finc']
fdec = self.config['fdec']
alphastart = self.config['alphastart']
fa = self.config['fa']
deltatmax = self.config['deltatmax']
maxsteps = self.config['maxsteps']
alpha = alphastart
deltat = 0.1
pos = self.get_pos_arr(force=True)
v = sp.zeros_like(pos)
self._update_vel(v)
v = self._get_vel_arr()
steps_since_negative = 0
def norm_arr_old(vec):
return sp.sqrt(sp.sum(vec**2,1))
def unitize_arr_old(vec):
return nan_to_num(((vec.T)/norm_arr(vec)).T)
norm_arr = norm
forces = nan_to_num(sp.array([ [sp.inf,sp.inf]]))
step_num = 0
self.logger.info("Beginning FIRE Relaxation -- fmax={}".format(fmax))
maxdpos = 100000.0
while max(norm_arr(forces)) > fmax and step_num < maxsteps:
forces = self.forces
self.logger.debug("Computed forces: {forces}".format(forces=pprint.pformat(forces)))
power = sp.vdot(forces,v)
self.logger.info("Step: {}, max_force: {}, power: {}".format(step_num,
max(norm_arr(forces)),
power))
#DEBUG PRINTING
#print "Step: {}, max_force: {}, power: {}, deltat: {}".format(step_num,
# max(norm_arr(forces)),
# power, deltat)
v = nan_to_num( (1.0 - alpha)*v + alpha*(norm_arr(v)*unitize_arr(forces).T).T )
if power>0.:
if steps_since_negative > Nmin:
deltat = min(deltat * finc, deltatmax)
alpha = alpha*fa
steps_since_negative += 1
else:
steps_since_negative = 0
deltat = deltat * fdec
v *= 0.
alpha = alphastart
v += forces*deltat
pos += v*deltat
self._update_pos(pos)
step_num += 1
#maxdpos = max(norm_arr(v*deltat))
#DEBUG PRINTING
#print "Maximum position change = {}".format(maxdpos)
#DEBUG_PLOT
#self.plot_sized_cells()
#self.plot_links()
#self.plot_forces()
#py.draw()
self._update_pos(pos)
self._update_vel(v)
self.logger.info("Relaxation finished...")
def save(self,filename):
self.logger.info("SAVING state to {}".format(filename))
with open(filename,'w') as f:
pickle.dump( (self.config, self.cells, self.links, self._ghosts, self.T ), f )
def vmd_out(self,filename):
""" Write a VMD compatible file to filename """
with open(filename,'w') as f:
positions = self.get_pos_arr(force=True)
formatstring = "{color} {x} {y} {z}\n"
for ind,row in enumerate(positions):
f.write(formatstring.format(x=row[0], y=row[1], z=0, color=self.cells[ind].type.type_ind))
def plot_forces(self,factor=5):
X,Y = self.get_pos_arr().T
FX,FY = self.forces.T
py.quiver(X,Y,FX,FY,scale=factor)
#Some code for ASE neighborlist functionality
def get_positions(self):
return sp.hstack(( self.get_pos_arr(), sp.zeros((self.num_cells,1)) ) )
def get_pbc(self):
return sp.array([True,False,False])
def get_cell(self):
return sp.array([[self.XSIZE,0,0],[0,self.YSIZE,0],[0,0,1]])
def __len__(self):
return self.num_cells
def load_from_file(filename):
with open(filename,'r') as f:
config, cells, links, ghosts, T = pickle.load(f)
Q = CancerSim(config)
Q.cells = cells
Q.ghosts = ghosts
Q.T = T
Q.links = links
Q.cancer_cells = [cell for cell in cells if cell.type.name == "Cancer"]
Q.num_cells = len(Q.cells)
return Q
if __name__ == "__main__":
Q = CancerSim()
Q._triang_lattice()
Q.delaunay()
Q._freeze_links()
Q.add_cancer_cell([XSIZE/2.,YSIZE/2 + 3],1)
Q.plot_cells()
self = Q
"""
TODO: have links know about periodic boundary conditions (maybe)
freeze links (DONE)
Ghost cells need update method. (DONE)
fire relaxation (DONE)
set and divide cancer cells (DONE)
long range forces (DONE)
cache the link calcs
cache the KDTree calcs?
allow more transparent custimization
expose CellTypes
use logging module
"""
| |
import functools
import json
import sys
import types
import re
import uuid
import warnings
from datetime import date, datetime
import pytz
from neomodel import config
from neomodel.exceptions import InflateError, DeflateError, RequiredProperty
if sys.version_info >= (3, 0):
unicode = str
def display_for(key):
def display_choice(self):
return getattr(self.__class__, key).choices[getattr(self, key)]
return display_choice
class PropertyManager(object):
"""
Common methods for handling properties on node and relationship objects.
"""
def __init__(self, **kwargs):
properties = getattr(self, "__all_properties__", None)
if properties is None:
properties = \
self.defined_properties(rels=False, aliases=False).items()
for name, property in properties:
if kwargs.get(name) is None:
if getattr(property, 'has_default', False):
setattr(self, name, property.default_value())
else:
setattr(self, name, None)
else:
setattr(self, name, kwargs[name])
if getattr(property, 'choices', None):
setattr(self, 'get_{0}_display'.format(name),
types.MethodType(display_for(name), self))
if name in kwargs:
del kwargs[name]
aliases = getattr(self, "__all_aliases__", None)
if aliases is None:
aliases = self.defined_properties(
aliases=True, rels=False, properties=False).items()
for name, property in aliases:
if name in kwargs:
setattr(self, name, kwargs[name])
del kwargs[name]
# undefined properties (for magic @prop.setters etc)
for name, property in kwargs.items():
setattr(self, name, property)
@property
def __properties__(self):
from .relationship_manager import RelationshipManager
return dict((name, value) for name, value in vars(self).items()
if not name.startswith('_')
and not callable(value)
and not isinstance(value,
(RelationshipManager, AliasProperty,))
)
@classmethod
def deflate(cls, properties, obj=None, skip_empty=False):
# deflate dict ready to be stored
deflated = {}
for name, property \
in cls.defined_properties(aliases=False, rels=False).items():
db_property = property.db_property or name
if properties.get(name) is not None:
deflated[db_property] = property.deflate(properties[name], obj)
elif property.has_default:
deflated[db_property] = property.deflate(
property.default_value(), obj
)
elif property.required or property.unique_index:
raise RequiredProperty(name, cls)
elif not skip_empty:
deflated[db_property] = None
return deflated
@classmethod
def defined_properties(cls, aliases=True, properties=True, rels=True):
from .relationship_manager import RelationshipDefinition
props = {}
for baseclass in reversed(cls.__mro__):
props.update(dict(
(name, property) for name, property in vars(baseclass).items()
if (aliases and isinstance(property, AliasProperty))
or (properties and isinstance(property, Property)
and not isinstance(property, AliasProperty))
or (rels and isinstance(property, RelationshipDefinition))
))
return props
def validator(fn):
fn_name = fn.func_name if hasattr(fn, 'func_name') else fn.__name__
if fn_name == 'inflate':
exc_class = InflateError
elif fn_name == 'deflate':
exc_class = DeflateError
else:
raise Exception("Unknown Property method " + fn_name)
@functools.wraps(fn)
def _validator(self, value, obj=None, rethrow=True):
if rethrow:
try:
return fn(self, value)
except Exception as e:
raise exc_class(self.name, self.owner, str(e), obj)
else:
# For using with ArrayProperty where we don't want an Inflate/Deflate error.
return fn(self, value)
return _validator
class Property(object):
"""
Base class for object properties.
:param unique_index: Creates a unique index for this property. Defaults to
``False``.
:type unique_index: :class:`bool`
:param index: Creates an index for this property. Defaults to ``False``.
:type index: :class:`bool`
:param required: Marks the property as required. Defaults to ``False``.
:type required: :class:`bool`
:param default: A default value or callable that returns one to set when a
node is initialized without specifying this property.
:param db_property: A name that this property maps to in the database.
Defaults to the model's property name.
:type db_property: :class:`str`
:param label: Optional, used by ``django_neomodel``.
:type label: :class:`str`
:param help_text: Optional, used by ``django_neomodel``.
:type help_text: :class:`str`
"""
form_field_class = 'CharField'
def __init__(self, unique_index=False, index=False, required=False, default=None,
db_property=None, label=None, help_text=None, **kwargs):
if default is not None and required:
raise ValueError(
"The arguments `required` and `default` are mutually exclusive."
)
if unique_index and index:
raise ValueError(
"The arguments `unique_index` and `index` are mutually exclusive."
)
self.required = required
self.unique_index = unique_index
self.index = index
self.default = default
self.has_default = True if self.default is not None else False
self.db_property = db_property
self.label = label
self.help_text = help_text
def default_value(self):
"""
Generate a default value
:return: the value
"""
if self.has_default:
if hasattr(self.default, '__call__'):
return self.default()
else:
return self.default
else:
raise Exception("No default value specified")
@property
def is_indexed(self):
return self.unique_index or self.index
class NormalizedProperty(Property):
"""
Base class for normalized properties. These use the same normalization
method to in- or deflating.
"""
@validator
def inflate(self, value):
return self.normalize(value)
@validator
def deflate(self, value):
return self.normalize(value)
def default_value(self):
default = super(NormalizedProperty, self).default_value()
return self.normalize(default)
def normalize(self, value):
raise NotImplementedError('Specialize normalize method')
## TODO remove this with the next major release
def _warn_NormalProperty_renamed():
warnings.warn(
'The class NormalProperty was renamed to NormalizedProperty. '
'Use that one as base class. The former will be removed in the next '
'major release.', DeprecationWarning)
if sys.version_info >= (3, 6):
class NormalProperty(NormalizedProperty):
def __init_subclass__(cls, **kwargs):
_warn_NormalProperty_renamed()
else:
class NormalProperty(NormalizedProperty):
def __init__(self, *args, **kwargs):
_warn_NormalProperty_renamed()
super(NormalProperty, self).__init__(*args, **kwargs)
##
class RegexProperty(NormalizedProperty):
"""
Validates a property against a regular expression.
If sub-classing set:
expression = r'[^@]+@[^@]+\.[^@]+'
"""
form_field_class = 'RegexField'
expression = None
def __init__(self, expression=None, **kwargs):
"""
Initializes new property with an expression.
:param str expression: regular expression validating this property
"""
super(RegexProperty, self).__init__(**kwargs)
actual_re = expression or self.expression
if actual_re is None:
raise ValueError('expression is undefined')
self.expression = actual_re
def normalize(self, value):
normal = unicode(value)
if not re.match(self.expression, normal):
raise ValueError(
'{0!r} does not matches {1!r}'.format(
value,
self.expression,
)
)
return normal
class EmailProperty(RegexProperty):
"""
Store email addresses
"""
form_field_class = 'EmailField'
expression = r'[^@]+@[^@]+\.[^@]+'
class StringProperty(NormalizedProperty):
"""
Stores a unicode string
:param choices: A mapping of valid strings to label strings that are used
to display information in an application. If the default
value ``None`` is used, any string is valid.
:type choices: Any type that can be used to initiate a :class:`dict`.
"""
def __init__(self, choices=None, **kwargs):
super(StringProperty, self).__init__(**kwargs)
if choices is None:
self.choices = None
else:
try:
self.choices = dict(choices)
except Exception:
raise ValueError("The choices argument must be convertable to "
"a dictionary.")
# Python 3:
# except Exception as e:
# raise ValueError("The choices argument must be convertable to "
# "a dictionary.") from e
self.form_field_class = 'TypedChoiceField'
def normalize(self, value):
if self.choices is not None and value not in self.choices:
raise ValueError("Invalid choice: {0}".format(value))
return unicode(value)
def default_value(self):
return self.normalize(super(StringProperty, self).default_value())
class IntegerProperty(Property):
"""
Stores an Integer value
"""
form_field_class = 'IntegerField'
@validator
def inflate(self, value):
return int(value)
@validator
def deflate(self, value):
return int(value)
def default_value(self):
return int(super(IntegerProperty, self).default_value())
class ArrayProperty(Property):
"""
Stores a list of items
"""
def __init__(self, base_property=None, **kwargs):
"""
Store a list of values, optionally of a specific type.
:param base_property: List item type e.g StringProperty for string
:type: Property
"""
# list item type
if base_property is not None:
if not isinstance(base_property, Property):
raise TypeError('Expecting neomodel Property')
if isinstance(base_property, ArrayProperty):
raise TypeError('Cannot have nested ArrayProperty')
for ilegal_attr in ['default', 'index', 'unique_index', 'required']:
if getattr(base_property, ilegal_attr, None):
raise ValueError('ArrayProperty base_property cannot have "{0}" set'.format(ilegal_attr))
self.base_property = base_property
super(ArrayProperty, self).__init__(**kwargs)
@validator
def inflate(self, value):
if self.base_property:
return [self.base_property.inflate(item, rethrow=False) for item in value]
return list(value)
@validator
def deflate(self, value):
if self.base_property:
return [self.base_property.deflate(item, rethrow=False) for item in value]
return list(value)
def default_value(self):
return list(super(ArrayProperty, self).default_value())
class FloatProperty(Property):
"""
Store a floating point value
"""
form_field_class = 'FloatField'
@validator
def inflate(self, value):
return float(value)
@validator
def deflate(self, value):
return float(value)
def default_value(self):
return float(super(FloatProperty, self).default_value())
class BooleanProperty(Property):
"""
Stores a boolean value
"""
form_field_class = 'BooleanField'
@validator
def inflate(self, value):
return bool(value)
@validator
def deflate(self, value):
return bool(value)
def default_value(self):
return bool(super(BooleanProperty, self).default_value())
class DateProperty(Property):
"""
Stores a date
"""
form_field_class = 'DateField'
@validator
def inflate(self, value):
return datetime.strptime(unicode(value), "%Y-%m-%d").date()
@validator
def deflate(self, value):
if not isinstance(value, date):
msg = 'datetime.date object expected, got {0}'.format(repr(value))
raise ValueError(msg)
return value.isoformat()
class DateTimeFormatProperty(Property):
"""
Store a datetime by custome format
:param default_now: If ``True``, the creation time (Local) will be used as default.
Defaults to ``False``.
:param format: Date format string, default is %Y-%m-%d
:type default_now: :class:`bool`
:type format: :class:`str`
"""
form_field_class = 'DateTimeFormatField'
def __init__(self, default_now=False, format="%Y-%m-%d", **kwargs):
if default_now:
if 'default' in kwargs:
raise ValueError('too many defaults')
kwargs['default'] = lambda: datetime.now()
self.format = format
super(DateTimeFormatProperty, self).__init__(**kwargs)
@validator
def inflate(self, value):
return datetime.strptime(unicode(value), self.format)
@validator
def deflate(self, value):
if not isinstance(value, datetime):
raise ValueError('datetime object expected, got {0}.'.format(type(value)))
return datetime.strftime(value, self.format)
class DateTimeProperty(Property):
""" A property representing a :class:`datetime.datetime` object as
unix epoch.
:param default_now: If ``True``, the creation time (UTC) will be used as default.
Defaults to ``False``.
:type default_now: :class:`bool`
"""
form_field_class = 'DateTimeField'
def __init__(self, default_now=False, **kwargs):
if default_now:
if 'default' in kwargs:
raise ValueError('too many defaults')
kwargs['default'] = lambda: datetime.utcnow().replace(tzinfo=pytz.utc)
super(DateTimeProperty, self).__init__(**kwargs)
@validator
def inflate(self, value):
try:
epoch = float(value)
except ValueError:
raise ValueError("Float or integer expected, got {0} can't inflate to "
"datetime.".format(type(value)))
return datetime.utcfromtimestamp(epoch).replace(tzinfo=pytz.utc)
@validator
def deflate(self, value):
if not isinstance(value, datetime):
raise ValueError('datetime object expected, got {0}.'.format(type(value)))
if value.tzinfo:
value = value.astimezone(pytz.utc)
epoch_date = datetime(1970, 1, 1, tzinfo=pytz.utc)
elif config.FORCE_TIMEZONE:
raise ValueError("Error deflating {0}: No timezone provided.".format(value))
else:
# No timezone specified on datetime object.. assuming UTC
epoch_date = datetime(1970, 1, 1)
return float((value - epoch_date).total_seconds())
class JSONProperty(Property):
"""
Store a data structure as a JSON string.
The structure will be inflated when a node is retrieved.
"""
def __init__(self, *args, **kwargs):
super(JSONProperty, self).__init__(*args, **kwargs)
@validator
def inflate(self, value):
return json.loads(value)
@validator
def deflate(self, value):
return json.dumps(value)
class AliasProperty(property, Property):
"""
Alias another existing property
"""
def __init__(self, to=None):
"""
Create new alias
:param to: name of property aliasing
:type: str
"""
self.target = to
self.required = False
self.has_default = False
def aliased_to(self):
return self.target
def __get__(self, obj, cls):
return getattr(obj, self.aliased_to()) if obj else self
def __set__(self, obj, value):
setattr(obj, self.aliased_to(), value)
@property
def index(self):
return getattr(self.owner, self.aliased_to()).index
@property
def unique_index(self):
return getattr(self.owner, self.aliased_to()).unique_index
class UniqueIdProperty(Property):
"""
A unique identifier, a randomly generated uid (uuid4) with a unique index
"""
def __init__(self, **kwargs):
for item in ['required', 'unique_index', 'index', 'default']:
if item in kwargs:
raise ValueError('{0} argument ignored by {1}'.format(item, self.__class__.__name__))
kwargs['unique_index'] = True
kwargs['default'] = lambda: uuid.uuid4().hex
super(UniqueIdProperty, self).__init__(**kwargs)
@validator
def inflate(self, value):
return unicode(value)
@validator
def deflate(self, value):
return unicode(value)
| |
"""AWS Image Creation, Management, Testing"""
import json
from copy import deepcopy
from typing import Tuple
import boto3
import botocore.exceptions
import pkg_resources
import yaml
from pkg_resources import resource_string
from retrying import retry
import gen
import gen.build_deploy.util as util
import pkgpanda.util
from gen.internals import Late, Source
from pkgpanda.util import logger, split_by_token
def get_ip_detect(name):
return yaml.dump(resource_string('gen', 'ip-detect/{}.sh'.format(name)).decode())
def calculate_ip_detect_public_contents(aws_masters_have_public_ip):
return get_ip_detect({'true': 'aws_public', 'false': 'aws'}[aws_masters_have_public_ip])
def validate_provider(provider):
assert provider == 'aws'
aws_base_source = Source(entry={
'validate': [
validate_provider
],
'default': {
'platform': 'aws',
'resolvers': '["169.254.169.253"]',
'num_private_slaves': '5',
'num_public_slaves': '1',
'os_type': '',
'aws_masters_have_public_ip': 'true',
'enable_docker_gc': 'true'
},
'must': {
'aws_region': Late('{ "Ref" : "AWS::Region" }'),
'aws_stack_id': Late('{ "Ref" : "AWS::StackId" }'),
'aws_stack_name': Late('{ "Ref" : "AWS::StackName" }'),
'ip_detect_contents': get_ip_detect('aws'),
'ip_detect_public_contents': calculate_ip_detect_public_contents,
'ip6_detect_contents': get_ip_detect('aws6'),
'exhibitor_explicit_keys': 'false',
'cluster_name': Late('{ "Ref" : "AWS::StackName" }'),
'master_discovery': 'master_http_loadbalancer',
# The cloud_config template variables pertaining to "cloudformation.json"
'master_cloud_config': '{{ master_cloud_config }}',
'agent_private_cloud_config': '{{ slave_cloud_config }}',
'agent_public_cloud_config': '{{ slave_public_cloud_config }}',
# template variable for the generating advanced template cloud configs
'cloud_config': '{{ cloud_config }}',
'rexray_config_preset': 'aws',
'fault_domain_detect_contents': yaml.dump(
pkg_resources.resource_string('gen', 'fault-domain-detect/cloud.sh').decode())
},
'conditional': {
'oauth_available': {
'true': {
'must': {
'oauth_enabled': Late('{ "Ref" : "OAuthEnabled" }'),
'adminrouter_auth_enabled': Late('{ "Ref" : "OAuthEnabled" }'),
}
},
'false': {}
},
'licensing_enabled': {
'true': {
'must': {
'license_key_contents': Late('{ "Ref" : "LicenseKey" }'),
},
'secret': [
'license_key_contents',
],
},
'false': {},
}
}
})
aws_region_names = [
{
'name': 'US West (N. California)',
'id': 'us-west-1'
},
{
'name': 'US West (Oregon)',
'id': 'us-west-2'
},
{
'name': 'US East (N. Virginia)',
'id': 'us-east-1'
},
{
'name': 'South America (Sao Paulo)',
'id': 'sa-east-1'
},
{
'name': 'EU (Ireland)',
'id': 'eu-west-1'
},
{
'name': 'EU (Frankfurt)',
'id': 'eu-central-1'
},
{
'name': 'Asia Pacific (Tokyo)',
'id': 'ap-northeast-1'
},
{
'name': 'Asia Pacific (Singapore)',
'id': 'ap-southeast-1'
},
{
'name': 'Asia Pacific (Sydney)',
'id': 'ap-southeast-2'
}]
region_to_ami_map = {
'ap-northeast-1': {
'coreos': 'ami-93f2baf4',
'stable': 'ami-93f2baf4',
'el7': 'ami-965345f8',
'el7prereq': 'ami-72f93314',
'natami': 'ami-55c29e54'
},
'ap-southeast-1': {
'coreos': 'ami-aacc7dc9',
'stable': 'ami-aacc7dc9',
'el7': 'ami-8af586e9',
'el7prereq': 'ami-cac2b2a9',
'natami': 'ami-b082dae2'
},
'ap-southeast-2': {
'coreos': 'ami-9db0b0fe',
'stable': 'ami-9db0b0fe',
'el7': 'ami-427d9c20',
'el7prereq': 'ami-a0d736c2',
'natami': 'ami-996402a3'
},
'eu-central-1': {
'coreos': 'ami-903df7ff',
'stable': 'ami-903df7ff',
'el7': 'ami-2d0cbc42',
'el7prereq': 'ami-b371c1dc',
'natami': 'ami-204c7a3d'
},
'eu-west-1': {
'coreos': 'ami-abcde0cd',
'stable': 'ami-abcde0cd',
'el7': 'ami-e46ea69d',
'el7prereq': 'ami-4d4f8634',
'natami': 'ami-3760b040'
},
'sa-east-1': {
'coreos': 'ami-c11573ad',
'stable': 'ami-c11573ad',
'el7': 'ami-a5acd0c9',
'el7prereq': 'ami-1264187e',
'natami': 'ami-b972dba4'
},
'us-east-1': {
'coreos': 'ami-1ad0000c',
'stable': 'ami-1ad0000c',
'el7': 'ami-771beb0d',
'el7prereq': 'ami-b05aadca',
'natami': 'ami-4c9e4b24'
},
'us-gov-west-1': {
'coreos': 'ami-e441fb85',
'stable': 'ami-e441fb85',
'el7': 'ami-9923a1f8',
'el7prereq': 'ami-9923a1f8',
'natami': 'ami-fe991b9f'
},
'us-west-1': {
'coreos': 'ami-b31d43d3',
'stable': 'ami-b31d43d3',
'el7': 'ami-866151e6',
'el7prereq': 'ami-63cafb03',
'natami': 'ami-2b2b296e'
},
'us-west-2': {
'coreos': 'ami-444dcd24',
'stable': 'ami-444dcd24',
'el7': 'ami-a9b24bd1',
'el7prereq': 'ami-1de01e65',
'natami': 'ami-bb69128b'
}
}
late_services = """- name: dcos-cfn-signal.service
command: start
no_block: true
content: |
[Unit]
Description=AWS Setup: Signal CloudFormation Success
ConditionPathExists=!/var/lib/dcos-cfn-signal
[Service]
Type=simple
Restart=on-failure
StartLimitInterval=0
RestartSec=15s
EnvironmentFile=/opt/mesosphere/environment
EnvironmentFile=/opt/mesosphere/etc/cfn_signal_metadata
Environment="AWS_CFN_SIGNAL_THIS_RESOURCE={{ report_name }}"
ExecStartPre=/bin/ping -c1 leader.mesos
ExecStartPre=/opt/mesosphere/bin/dcos-diagnostics check node-poststart
ExecStartPre=/opt/mesosphere/bin/cfn-signal
ExecStart=/usr/bin/touch /var/lib/dcos-cfn-signal"""
cf_instance_groups = {
'master': {
'report_name': 'MasterServerGroup',
'roles': ['master', 'aws_master']
},
'slave': {
'report_name': 'SlaveServerGroup',
'roles': ['slave']
},
'slave_public': {
'report_name': 'PublicSlaveServerGroup',
'roles': ['slave_public']
}
}
# TODO(cmaloney): this and cf_instance_groups should be the _same_ dictionary
# this just being accessing the report-name key.
aws_advanced_report_names = {
'master': 'MasterServerGroup',
'pub-agent': 'PublicAgentServerGroup',
'priv-agent': 'PrivateAgentServerGroup'
}
groups = {
'master': (
'master', Source(entry={'must': {
's3_bucket': Late('{ "Ref" : "ExhibitorS3Bucket" }'),
's3_prefix': Late('{ "Ref" : "AWS::StackName" }'),
'exhibitor_storage_backend': 'aws_s3',
'master_role': Late('{ "Ref" : "MasterRole" }'),
'agent_role': '',
'exhibitor_address': Late('{ "Fn::GetAtt" : [ "InternalMasterLoadBalancer", "DNSName" ] }'),
'has_master_external_loadbalancer': 'true',
'master_external_loadbalancer': Late('{ "Fn::GetAtt" : [ "ElasticLoadBalancer", "DNSName" ] }'),
}})),
'pub-agent': (
'slave_public', Source(entry={'must': {
'master_role': '',
'agent_role': Late('{ "Ref" : "PublicAgentRole" }'),
'exhibitor_storage_backend': 'agent_only_group_no_exhibitor',
'exhibitor_address': Late('{ "Ref" : "InternalMasterLoadBalancerDnsName" }'),
}})),
'priv-agent': (
'slave', Source(entry={'must': {
'master_role': '',
'agent_role': Late('{ "Ref" : "PrivateAgentRole" }'),
'exhibitor_storage_backend': 'agent_only_group_no_exhibitor',
'exhibitor_address': Late('{ "Ref" : "InternalMasterLoadBalancerDnsName" }'),
}}))
}
def gen_ami_mapping(mappings):
# create new dict with required mappings
# all will have region by default
final = {}
for region, amis in region_to_ami_map.items():
final[region] = dict()
for map_entry in mappings:
final_key = 'default' if map_entry == 'natami' else map_entry
final[region][final_key] = amis[map_entry]
return json.dumps(final, indent=4, sort_keys=True)
def transform(line):
def _jsonify_literals(parts):
for part, is_ref in parts:
if is_ref:
yield part
else:
yield json.dumps(part)
return ', '.join(_jsonify_literals(split_by_token('{ ', ' }', line))) + ', "\\n",\n'
def render_cloudformation_transform(cf_template, transform_func=lambda x: x, **kwds):
# TODO(cmaloney): There has to be a cleaner way to do this transformation.
# For now just moved from cloud_config_cf.py
# TODO(cmaloney): Move with the logic that does this same thing in Azure
template_str = gen.template.parse_str(cf_template).render(
{k: transform_func(v) for k, v in kwds.items()}
)
template_json = json.loads(template_str)
template_json['Metadata']['DcosImageCommit'] = util.dcos_image_commit
template_json['Metadata']['TemplateGenerationDate'] = util.template_generation_date
return json.dumps(template_json)
def render_cloudformation(cf_template, **kwds):
def transform_lines(text):
return ''.join(map(transform, text.splitlines())).rstrip(',\n')
return render_cloudformation_transform(cf_template, transform_func=transform_lines, **kwds)
@retry(stop_max_attempt_number=5, wait_exponential_multiplier=1000)
def validate_cf(template_body):
client = boto3.session.Session().client('cloudformation')
try:
client.validate_template(TemplateBody=template_body)
except botocore.exceptions.ClientError as ex:
print(json.dumps(json.loads(template_body), indent=4))
raise ex
def _as_cf_artifact(filename, cloudformation):
return {
'channel_path': 'cloudformation/{}'.format(filename),
'local_content': cloudformation,
'content_type': 'application/json; charset=utf-8'
}
def _as_artifact_and_pkg(variant_prefix, filename, bundle: Tuple):
cloudformation, results = bundle
yield _as_cf_artifact("{}{}".format(variant_prefix, filename), cloudformation)
for filename in results.stable_artifacts:
yield {
'reproducible_path': filename,
'local_path': filename,
}
def gen_supporting_template():
for template_key in ['infra.json']:
cf_template = 'aws/templates/advanced/{}'.format(template_key)
cloudformation = render_cloudformation_transform(resource_string("gen", cf_template).decode(),
nat_ami_mapping=gen_ami_mapping({'natami'}))
print("Validating CloudFormation: {}".format(cf_template))
validate_cf(cloudformation)
yield _as_cf_artifact(
template_key,
cloudformation)
def make_advanced_bundle(variant_args, extra_sources, template_name, cc_params):
extra_templates = [
'aws/dcos-config.yaml',
'aws/templates/advanced/{}'.format(template_name)
]
supported_os = ('coreos', 'el7')
if cc_params['os_type'] not in supported_os:
raise RuntimeError('Unsupported os_type: {}'.format(cc_params['os_type']))
elif cc_params['os_type'] == 'coreos':
extra_templates += ['coreos-aws/cloud-config.yaml', 'coreos/cloud-config.yaml']
cloud_init_implementation = 'coreos'
elif cc_params['os_type'] == 'el7':
cloud_init_implementation = 'canonical'
cc_params['os_type'] = 'el7prereq'
results = gen.generate(
arguments=variant_args,
extra_templates=extra_templates,
extra_sources=extra_sources + [aws_base_source],
# TODO(cmaloney): Merge this with dcos_installer/backend.py::get_aws_advanced_target()
extra_targets=[gen.internals.Target(variables={'cloudformation_s3_url_full'})])
cloud_config = results.templates['cloud-config.yaml']
# Add general services
cloud_config = results.utils.add_services(cloud_config, cloud_init_implementation)
cc_variant = deepcopy(cloud_config)
cc_variant = results.utils.add_units(
cc_variant,
yaml.safe_load(gen.template.parse_str(late_services).render(cc_params)),
cloud_init_implementation)
# Add roles
cc_variant = results.utils.add_roles(cc_variant, cc_params['roles'] + ['aws'])
# NOTE: If this gets printed in string stylerather than '|' the AWS
# parameters which need to be split out for the cloudformation to
# interpret end up all escaped and undoing it would be hard.
variant_cloudconfig = results.utils.render_cloudconfig(cc_variant)
# Render the cloudformation
cloudformation = render_cloudformation(
results.templates[template_name],
cloud_config=variant_cloudconfig)
print("Validating CloudFormation: {}".format(template_name))
validate_cf(cloudformation)
return (cloudformation, results)
def gen_advanced_template(arguments, variant_prefix, reproducible_artifact_path, os_type):
for node_type in ['master', 'priv-agent', 'pub-agent']:
# TODO(cmaloney): This forcibly overwriting arguments might overwrite a user set argument
# without noticing (such as exhibitor_storage_backend)
node_template_id, node_source = groups[node_type]
local_source = Source()
local_source.add_must('os_type', os_type)
local_source.add_must('region_to_ami_mapping', gen_ami_mapping({"coreos", "el7", "el7prereq"}))
params = cf_instance_groups[node_template_id]
params['report_name'] = aws_advanced_report_names[node_type]
params['os_type'] = os_type
params['node_type'] = node_type
template_key = 'advanced-{}'.format(node_type)
template_name = template_key + '.json'
def _as_artifact(filename, bundle):
yield from _as_artifact_and_pkg(variant_prefix, filename, bundle)
if node_type == 'master':
for num_masters in [1, 3, 5, 7]:
master_tk = '{}-{}-{}'.format(os_type, template_key, num_masters)
print('Building {} {} for num_masters = {}'.format(os_type, node_type, num_masters))
num_masters_source = Source()
num_masters_source.add_must('num_masters', str(num_masters))
bundle = make_advanced_bundle(arguments,
[node_source, local_source, num_masters_source],
template_name,
deepcopy(params))
yield from _as_artifact('{}.json'.format(master_tk), bundle)
# Zen template corresponding to this number of masters
yield _as_cf_artifact(
'{}{}-zen-{}.json'.format(variant_prefix, os_type, num_masters),
render_cloudformation_transform(
resource_string("gen", "aws/templates/advanced/zen.json").decode(),
variant_prefix=variant_prefix,
reproducible_artifact_path=reproducible_artifact_path,
**bundle[1].arguments))
else:
local_source.add_must('num_masters', '1')
local_source.add_must('nat_ami_mapping', gen_ami_mapping({"natami"}))
bundle = make_advanced_bundle(arguments,
[node_source, local_source],
template_name,
deepcopy(params))
yield from _as_artifact('{}-{}'.format(os_type, template_name), bundle)
aws_simple_source = Source({
'must': {
'exhibitor_address': Late('{ "Fn::GetAtt" : [ "InternalMasterLoadBalancer", "DNSName" ] }'),
's3_bucket': Late('{ "Ref" : "ExhibitorS3Bucket" }'),
'exhibitor_storage_backend': 'aws_s3',
'master_role': Late('{ "Ref" : "MasterRole" }'),
'agent_role': Late('{ "Ref" : "SlaveRole" }'),
's3_prefix': Late('{ "Ref" : "AWS::StackName" }'),
'region_to_ami_mapping': gen_ami_mapping({"stable"}),
'nat_ami_mapping': gen_ami_mapping({"natami"}),
'has_master_external_loadbalancer': 'true',
'master_external_loadbalancer': Late('{ "Fn::GetAtt" : [ "ElasticLoadBalancer", "DNSName" ] }'),
}
})
def gen_simple_template(variant_prefix, filename, arguments, extra_source):
results = gen.generate(
arguments=arguments,
extra_templates=[
'aws/templates/cloudformation.json',
'aws/dcos-config.yaml',
'coreos-aws/cloud-config.yaml',
'coreos/cloud-config.yaml'],
extra_sources=[aws_base_source, aws_simple_source, extra_source])
cloud_config = results.templates['cloud-config.yaml']
# Add general services
cloud_config = results.utils.add_services(cloud_config, 'coreos')
# Specialize for master, slave, slave_public
variant_cloudconfig = {}
for variant, params in cf_instance_groups.items():
cc_variant = deepcopy(cloud_config)
# Specialize the dcos-cfn-signal service
cc_variant = results.utils.add_units(
cc_variant,
yaml.safe_load(gen.template.parse_str(late_services).render(deepcopy(params))))
# Add roles
cc_variant = results.utils.add_roles(cc_variant, params['roles'] + ['aws'])
# NOTE: If this gets printed in string stylerather than '|' the AWS
# parameters which need to be split out for the cloudformation to
# interpret end up all escaped and undoing it would be hard.
variant_cloudconfig[variant] = results.utils.render_cloudconfig(cc_variant)
# Render the cloudformation
cloudformation = render_cloudformation(
results.templates['cloudformation.json'],
master_cloud_config=variant_cloudconfig['master'],
slave_cloud_config=variant_cloudconfig['slave'],
slave_public_cloud_config=variant_cloudconfig['slave_public'])
with logger.scope("Validating CloudFormation"):
validate_cf(cloudformation)
yield from _as_artifact_and_pkg(variant_prefix, filename, (cloudformation, results))
button_template = "<a href='https://console.aws.amazon.com/cloudformation/home?region={region_id}#/stacks/new?templateURL={cloudformation_full_s3_url}/{template_name}.cloudformation.json'><img src='https://s3.amazonaws.com/cloudformation-examples/cloudformation-launch-stack.png' alt='Launch stack button'></a>" # noqa
region_line_template = "<tr><td>{region_name}</td><td>{region_id}</td><td>{single_master_button}</td><td>{multi_master_button}</td></tr>" # noqa
def gen_buttons(build_name, reproducible_artifact_path, tag, commit, variant_arguments):
# Generate the button page.
# TODO(cmaloney): Switch to package_resources
variant_list = list(sorted(pkgpanda.util.variant_prefix(x) for x in variant_arguments.keys()))
regular_buttons = list()
for region in aws_region_names:
def get_button(template_name, s3_url):
return button_template.format(
region_id=region['id'],
reproducible_artifact_path=reproducible_artifact_path,
template_name=template_name,
cloudformation_full_s3_url=s3_url)
button_line = ""
for variant, arguments in variant_arguments.items():
variant_prefix = pkgpanda.util.variant_prefix(variant)
s3_url = arguments['cloudformation_s3_url_full']
button_line += region_line_template.format(
region_name=region['name'],
region_id=region['id'],
single_master_button=get_button(variant_prefix + 'single-master', s3_url=s3_url),
multi_master_button=get_button(variant_prefix + 'multi-master', s3_url=s3_url))
regular_buttons.append(button_line)
return gen.template.parse_resources('aws/templates/aws.html').render(
{
'build_name': build_name,
'reproducible_artifact_path': reproducible_artifact_path,
'tag': tag,
'commit': commit,
'regular_buttons': regular_buttons,
'variant_list': variant_list
})
def do_create(tag, build_name, reproducible_artifact_path, commit, variant_arguments, all_completes):
# Generate the single-master and multi-master templates.
for bootstrap_variant, variant_base_args in variant_arguments.items():
variant_prefix = pkgpanda.util.variant_prefix(bootstrap_variant)
def make(num_masters, filename):
num_masters_source = Source()
num_masters_source.add_must('num_masters', str(num_masters))
yield from gen_simple_template(
variant_prefix,
filename,
variant_base_args,
num_masters_source)
# Single master templates
yield from make(1, 'single-master.cloudformation.json')
# Multi master templates
yield from make(3, 'multi-master.cloudformation.json')
# Advanced templates
for os_type in ['coreos', 'el7']:
yield from gen_advanced_template(
variant_base_args,
variant_prefix,
reproducible_artifact_path,
os_type)
# Button page linking to the basic templates.
button_page = gen_buttons(build_name, reproducible_artifact_path, tag, commit, variant_arguments)
yield {
'channel_path': 'aws.html',
'local_content': button_page,
'content_type': 'text/html; charset=utf-8'}
# This renders the infra template only, which has no difference between CE and EE
yield from gen_supporting_template()
| |
# Copyright (c) 2012 OpenStack Foundation.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import mock
import webob.exc
from neutron.api.v2 import attributes as attr
from neutron.common import constants as const
from neutron.common.test_lib import test_config
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import securitygroups_db
from neutron.extensions import securitygroup as ext_sg
from neutron.tests.unit import test_db_plugin
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_security_group.'
'SecurityGroupTestPlugin')
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
class SecurityGroupTestExtensionManager(object):
def get_resources(self):
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attr.RESOURCE_ATTRIBUTE_MAP.update(
ext_sg.RESOURCE_ATTRIBUTE_MAP)
return ext_sg.Securitygroup.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class SecurityGroupsTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def _create_security_group(self, fmt, name, description, **kwargs):
data = {'security_group': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test_tenant'),
'description': description}}
security_group_req = self.new_create_request('security-groups', data,
fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_req.get_response(self.ext_api)
def _build_security_group_rule(self, security_group_id, direction, proto,
port_range_min=None, port_range_max=None,
remote_ip_prefix=None, remote_group_id=None,
tenant_id='test_tenant',
ethertype=const.IPv4):
data = {'security_group_rule': {'security_group_id': security_group_id,
'direction': direction,
'protocol': proto,
'ethertype': ethertype,
'tenant_id': tenant_id,
'ethertype': ethertype}}
if port_range_min:
data['security_group_rule']['port_range_min'] = port_range_min
if port_range_max:
data['security_group_rule']['port_range_max'] = port_range_max
if remote_ip_prefix:
data['security_group_rule']['remote_ip_prefix'] = remote_ip_prefix
if remote_group_id:
data['security_group_rule']['remote_group_id'] = remote_group_id
return data
def _create_security_group_rule(self, fmt, rules, **kwargs):
security_group_rule_req = self.new_create_request(
'security-group-rules', rules, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
security_group_rule_req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return security_group_rule_req.get_response(self.ext_api)
def _make_security_group(self, fmt, name, description, **kwargs):
res = self._create_security_group(fmt, name, description, **kwargs)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_security_group_rule(self, fmt, rules, **kwargs):
res = self._create_security_group_rule(self.fmt, rules)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def security_group(self, name='webservers', description='webservers',
fmt=None, no_delete=False):
if not fmt:
fmt = self.fmt
security_group = self._make_security_group(fmt, name, description)
try:
yield security_group
finally:
if not no_delete:
self._delete('security-groups',
security_group['security_group']['id'])
@contextlib.contextmanager
def security_group_rule(self, security_group_id='4cd70774-cc67-4a87-9b39-7'
'd1db38eb087',
direction='ingress', protocol=const.PROTO_NAME_TCP,
port_range_min='22', port_range_max='22',
remote_ip_prefix=None, remote_group_id=None,
fmt=None, no_delete=False, ethertype=const.IPv4):
if not fmt:
fmt = self.fmt
rule = self._build_security_group_rule(security_group_id,
direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype=ethertype)
security_group_rule = self._make_security_group_rule(self.fmt, rule)
try:
yield security_group_rule
finally:
if not no_delete:
self._delete('security-group-rules',
security_group_rule['security_group_rule']['id'])
def _delete_default_security_group_egress_rules(self, security_group_id):
"""Deletes default egress rules given a security group ID."""
res = self._list(
'security-group-rules',
query_params='security_group_id=%s' % security_group_id)
for r in res['security_group_rules']:
if (r['direction'] == 'egress' and not r['port_range_max'] and
not r['port_range_min'] and not r['protocol']
and not r['remote_ip_prefix']):
self._delete('security-group-rules', r['id'])
def _assert_sg_rule_has_kvs(self, security_group_rule, expected_kvs):
"""Asserts that the sg rule has expected key/value pairs passed
in as expected_kvs dictionary
"""
for k, v in expected_kvs.iteritems():
self.assertEqual(security_group_rule[k], v)
class SecurityGroupsTestCaseXML(SecurityGroupsTestCase):
fmt = 'xml'
class SecurityGroupTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
securitygroups_db.SecurityGroupDbMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with security groups.
"""
__native_pagination_support = True
__native_sorting_support = True
supported_extension_aliases = ["security-group"]
def create_port(self, context, port):
tenant_id = self._get_tenant_id_for_create(context, port['port'])
default_sg = self._ensure_default_security_group(context, tenant_id)
if not attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)):
port['port'][ext_sg.SECURITYGROUPS] = [default_sg]
session = context.session
with session.begin(subtransactions=True):
sgids = self._get_security_groups_on_port(context, port)
port = super(SecurityGroupTestPlugin, self).create_port(context,
port)
self._process_port_create_security_group(context, port,
sgids)
return port
def update_port(self, context, id, port):
session = context.session
with session.begin(subtransactions=True):
if ext_sg.SECURITYGROUPS in port['port']:
port['port'][ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
# delete the port binding and read it with the new rules
self._delete_port_security_group_bindings(context, id)
port['port']['id'] = id
self._process_port_create_security_group(
context, port['port'],
port['port'].get(ext_sg.SECURITYGROUPS))
port = super(SecurityGroupTestPlugin, self).update_port(
context, id, port)
return port
def create_network(self, context, network):
tenant_id = self._get_tenant_id_for_create(context, network['network'])
self._ensure_default_security_group(context, tenant_id)
return super(SecurityGroupTestPlugin, self).create_network(context,
network)
def get_ports(self, context, filters=None, fields=None,
sorts=[], limit=None, marker=None,
page_reverse=False):
neutron_lports = super(SecurityGroupTestPlugin, self).get_ports(
context, filters, sorts=sorts, limit=limit, marker=marker,
page_reverse=page_reverse)
return neutron_lports
class SecurityGroupDBTestCase(SecurityGroupsTestCase):
def setUp(self, plugin=None):
plugin = plugin or DB_PLUGIN_KLASS
ext_mgr = SecurityGroupTestExtensionManager()
test_config['extension_manager'] = ext_mgr
super(SecurityGroupDBTestCase, self).setUp(plugin)
class TestSecurityGroups(SecurityGroupDBTestCase):
def test_create_security_group(self):
name = 'webservers'
description = 'my webservers'
keys = [('name', name,), ('description', description)]
with self.security_group(name, description) as security_group:
for k, v, in keys:
self.assertEqual(security_group['security_group'][k], v)
# Verify that default egress rules have been created
sg_rules = security_group['security_group']['security_group_rules']
self.assertEqual(len(sg_rules), 2)
v4_rules = [r for r in sg_rules if r['ethertype'] == const.IPv4]
self.assertEqual(len(v4_rules), 1)
v4_rule = v4_rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv4,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_rule, expected)
v6_rules = [r for r in sg_rules if r['ethertype'] == const.IPv6]
self.assertEqual(len(v6_rules), 1)
v6_rule = v6_rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv6,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_rule, expected)
def test_update_security_group(self):
with self.security_group() as sg:
data = {'security_group': {'name': 'new_name',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_group']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['security_group']['name'],
data['security_group']['name'])
self.assertEqual(res['security_group']['description'],
data['security_group']['description'])
def test_update_security_group_name_to_default_fail(self):
with self.security_group() as sg:
data = {'security_group': {'name': 'default',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_group']['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_update_default_security_group_name_fail(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
data = {'security_group': {'name': 'new_name',
'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_groups'][0]['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_update_default_security_group_with_description(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
data = {'security_group': {'description': 'new_desc'}}
req = self.new_update_request('security-groups',
data,
sg['security_groups'][0]['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
self.assertEqual(res['security_group']['description'],
data['security_group']['description'])
def test_default_security_group(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
def test_create_default_security_group_fail(self):
name = 'default'
description = 'my webservers'
res = self._create_security_group(self.fmt, name, description)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_list_security_groups(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as security_groups:
self._test_list_resources('security-group',
security_groups,
query_params='description=sg')
def test_list_security_groups_with_sort(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_sort('security-group',
(sg3, sg2, sg1),
[('name', 'desc')],
query_params='description=sg')
def test_list_security_groups_with_pagination(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_pagination('security-group',
(sg1, sg2, sg3),
('name', 'asc'), 2, 2,
query_params='description=sg')
def test_list_security_groups_with_pagination_reverse(self):
with contextlib.nested(self.security_group(name='sg1',
description='sg'),
self.security_group(name='sg2',
description='sg'),
self.security_group(name='sg3',
description='sg')
) as (sg1, sg2, sg3):
self._test_list_with_pagination_reverse(
'security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2,
query_params='description=sg')
def test_create_security_group_rule_ethertype_invalid_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
ethertype = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', const.PROTO_NAME_TCP, '22',
'22', None, None, ethertype=ethertype)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_tcp_protocol_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = const.PROTO_NUM_TCP # TCP
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol, '22', '22')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_protocol_as_number(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = 2
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_case_insensitive(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'TCP'
port_range_min = 22
port_range_max = 22
ethertype = 'ipV4'
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
ethertype=ethertype) as rule:
# the lower case value will be return
self.assertEqual(rule['security_group_rule']['protocol'],
protocol.lower())
self.assertEqual(rule['security_group_rule']['ethertype'],
const.IPv4)
def test_get_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
remote_group_id = sg['security_group']['id']
res = self.new_show_request('security-groups', remote_group_id)
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix):
group = self.deserialize(
self.fmt, res.get_response(self.ext_api))
sg_rule = group['security_group']['security_group_rules']
self.assertEqual(group['security_group']['id'],
remote_group_id)
self.assertEqual(len(sg_rule), 3)
sg_rule = [r for r in sg_rule if r['direction'] == 'ingress']
for k, v, in keys:
self.assertEqual(sg_rule[0][k], v)
def test_delete_security_group(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description, no_delete=True) as sg:
remote_group_id = sg['security_group']['id']
self._delete('security-groups', remote_group_id,
webob.exc.HTTPNoContent.code)
def test_delete_default_security_group_admin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPNoContent.code)
def test_delete_default_security_group_nonadmin(self):
with self.network():
res = self.new_list_request('security-groups')
sg = self.deserialize(self.fmt, res.get_response(self.ext_api))
neutron_context = context.Context('', 'test-tenant')
self._delete('security-groups', sg['security_groups'][0]['id'],
webob.exc.HTTPConflict.code,
neutron_context=neutron_context)
def test_security_group_list_creates_default_security_group(self):
neutron_context = context.Context('', 'test-tenant')
sg = self._list('security-groups',
neutron_context=neutron_context).get('security_groups')
self.assertEqual(len(sg), 1)
def test_default_security_group_rules(self):
with self.network():
res = self.new_list_request('security-groups')
groups = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(groups['security_groups']), 1)
security_group_id = groups['security_groups'][0]['id']
res = self.new_list_request('security-group-rules')
rules = self.deserialize(self.fmt, res.get_response(self.ext_api))
self.assertEqual(len(rules['security_group_rules']), 4)
# Verify default rule for v4 egress
sg_rules = rules['security_group_rules']
rules = [
r for r in sg_rules
if r['direction'] == 'egress' and r['ethertype'] == const.IPv4
]
self.assertEqual(len(rules), 1)
v4_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv4,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_egress, expected)
# Verify default rule for v6 egress
rules = [
r for r in sg_rules
if r['direction'] == 'egress' and r['ethertype'] == const.IPv6
]
self.assertEqual(len(rules), 1)
v6_egress = rules[0]
expected = {'direction': 'egress',
'ethertype': const.IPv6,
'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_egress, expected)
# Verify default rule for v4 ingress
rules = [
r for r in sg_rules
if r['direction'] == 'ingress' and r['ethertype'] == const.IPv4
]
self.assertEqual(len(rules), 1)
v4_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': const.IPv4,
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v4_ingress, expected)
# Verify default rule for v6 ingress
rules = [
r for r in sg_rules
if r['direction'] == 'ingress' and r['ethertype'] == const.IPv6
]
self.assertEqual(len(rules), 1)
v6_ingress = rules[0]
expected = {'direction': 'ingress',
'ethertype': const.IPv6,
'remote_group_id': security_group_id,
'remote_ip_prefix': None,
'protocol': None,
'port_range_max': None,
'port_range_min': None}
self._assert_sg_rule_has_kvs(v6_ingress, expected)
def test_create_security_group_rule_remote_ip_prefix(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
with self.security_group(name, description) as sg2:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_group_id = sg2['security_group']['id']
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
keys = [('remote_group_id', remote_group_id),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id
) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmp_with_type_and_code(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_ICMP
# port_range_min (ICMP type) is greater than port_range_max
# (ICMP code) in order to confirm min <= max port check is
# not called for ICMP.
port_range_min = 8
port_range_max = 5
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_rule_icmp_with_type_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_ICMP
# ICMP type
port_range_min = 8
# ICMP code
port_range_max = None
keys = [('remote_ip_prefix', remote_ip_prefix),
('security_group_id', security_group_id),
('direction', direction),
('protocol', protocol),
('port_range_min', port_range_min),
('port_range_max', port_range_max)]
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix) as rule:
for k, v, in keys:
self.assertEqual(rule['security_group_rule'][k], v)
def test_create_security_group_source_group_ip_and_ip_prefix(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_bad_security_group_id(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant(self):
with self.security_group() as sg:
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': "bad_tenant"}}
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant_remote_group_id(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
sg2 = self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg2['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant',
'remote_group_id': sg['security_group']['id']}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_tenant_security_group_rule(self):
with self.security_group() as sg:
res = self._create_security_group(self.fmt, 'webservers',
'webservers',
tenant_id='bad_tenant')
self.deserialize(self.fmt, res)
rule = {'security_group_rule':
{'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'port_range_min': '22',
'port_range_max': '22',
'tenant_id': 'bad_tenant'}}
res = self._create_security_group_rule(self.fmt, rule,
tenant_id='bad_tenant',
set_context=True)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_bad_remote_group_id(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
remote_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_create_security_group_rule_duplicate_rules(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_min_port_greater_max(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
for protocol in [const.PROTO_NAME_TCP, const.PROTO_NAME_UDP,
const.PROTO_NUM_TCP, const.PROTO_NUM_UDP]:
rule = self._build_security_group_rule(
sg['security_group']['id'],
'ingress', protocol, '50', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_ports_but_no_protocol(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress', None, '22', '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_port_range_min_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', None)
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_port_range_max_only(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, None, '22')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_icmp_type_too_big(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, '256', None)
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_icmp_code_too_big(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
with self.security_group_rule(security_group_id):
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_ICMP, '8', '256')
self._create_security_group_rule(self.fmt, rule)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_list_ports_security_group(self):
with self.network() as n:
with self.subnet(n):
self._create_port(self.fmt, n['network']['id'])
req = self.new_list_request('ports')
res = req.get_response(self.api)
ports = self.deserialize(self.fmt, res)
port = ports['ports'][0]
self.assertEqual(len(port[ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['id'])
def test_list_security_group_rules(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_resources('security-group-rule',
[sgr1, sgr2, sgr3],
query_params=q)
def test_list_security_group_rules_with_sort(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_sort('security-group-rule',
(sgr3, sgr2, sgr1),
[('port_range_max', 'desc')],
query_params=q)
def test_list_security_group_rules_with_pagination(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
# Delete default rules as they would fail the following
# assertion at the end.
self._delete_default_security_group_egress_rules(
security_group_id)
q = 'direction=egress&security_group_id=' + security_group_id
self._test_list_with_pagination(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params=q)
def test_list_security_group_rules_with_pagination_reverse(self):
with self.security_group(name='sg') as sg:
security_group_id = sg['security_group']['id']
with contextlib.nested(self.security_group_rule(security_group_id,
direction='egress',
port_range_min=22,
port_range_max=22),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=23,
port_range_max=23),
self.security_group_rule(security_group_id,
direction='egress',
port_range_min=24,
port_range_max=24)
) as (sgr1, sgr2, sgr3):
self._test_list_with_pagination_reverse(
'security-group-rule', (sgr3, sgr2, sgr1),
('port_range_max', 'desc'), 2, 2,
query_params='direction=egress')
def test_update_port_with_security_group(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[sg['security_group']['id']]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# Test update port without security group
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name']}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
self._delete('ports', port['port']['id'])
def test_update_port_with_multiple_security_groups(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg1:
with self.security_group() as sg2:
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1['security_group']['id'],
sg2['security_group']['id']])
port = self.deserialize(self.fmt, res)
self.assertEqual(len(
port['port'][ext_sg.SECURITYGROUPS]), 2)
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_empty_list(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': []}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_update_port_remove_security_group_none(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
'security_groups': None}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'].get(ext_sg.SECURITYGROUPS),
[])
self._delete('ports', port['port']['id'])
def test_create_port_with_bad_security_group(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['bad_id'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_delete_security_group_port_in_use(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
res = self._create_port(self.fmt, n['network']['id'],
security_groups=(
[sg['security_group']['id']]))
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][ext_sg.SECURITYGROUPS][0],
sg['security_group']['id'])
# try to delete security group that's in use
res = self._delete('security-groups',
sg['security_group']['id'],
webob.exc.HTTPConflict.code)
# delete the blocking port
self._delete('ports', port['port']['id'])
def test_create_security_group_rule_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule1 = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '23',
'23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule1 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_allow_all_ipv4(self):
with self.security_group() as sg:
rule = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv4',
'tenant_id': 'test_tenant'}
res = self._create_security_group_rule(
self.fmt, {'security_group_rule': rule})
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_allow_all_ipv4_v6_bulk(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule_v4 = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv4',
'tenant_id': 'test_tenant'}
rule_v6 = {'security_group_id': sg['security_group']['id'],
'direction': 'ingress',
'ethertype': 'IPv6',
'tenant_id': 'test_tenant'}
rules = {'security_group_rules': [rule_v4, rule_v6]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_security_group_rule_duplicate_rule_in_post(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_in_post_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rules = {'security_group_rules': [rule['security_group_rule'],
rule['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_db(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg:
rule = self._build_security_group_rule(sg['security_group']['id'],
'ingress',
const.PROTO_NAME_TCP, '22',
'22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rules)
rule = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_duplicate_rule_db_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with self.security_group() as sg:
rule = self._build_security_group_rule(
sg['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rules = {'security_group_rules': [rule]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_security_group_rule_different_security_group_ids(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk "
"security_group_rule create")
with self.security_group() as sg1:
with self.security_group() as sg2:
rule1 = self._build_security_group_rule(
sg1['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '22', '22', '10.0.0.1/24')
rule2 = self._build_security_group_rule(
sg2['security_group']['id'], 'ingress',
const.PROTO_NAME_TCP, '23', '23', '10.0.0.1/24')
rules = {'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]
}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_invalid_ethertype(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = const.PROTO_NAME_TCP
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id,
ethertype='IPv5')
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_security_group_rule_with_invalid_protocol(self):
security_group_id = "4cd70774-cc67-4a87-9b39-7d1db38eb087"
direction = "ingress"
remote_ip_prefix = "10.0.0.0/24"
protocol = 'tcp/ip'
port_range_min = 22
port_range_max = 22
remote_group_id = "9cd70774-cc67-4a87-9b39-7d1db38eb087"
rule = self._build_security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_ip_prefix,
remote_group_id)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
def test_create_port_with_non_uuid(self):
with self.network() as n:
with self.subnet(n):
res = self._create_port(self.fmt, n['network']['id'],
security_groups=['not_valid'])
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code)
class TestSecurityGroupsXML(TestSecurityGroups):
fmt = 'xml'
| |
import unittest
import Integrator, Pslg, ElementAwarePslg, math
class IntegratorTest(unittest.TestCase):
def testConstantFunction1(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 1
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 0.5, 9)
def testConstantFunction2(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 4
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 2.0, 9)
def testFirstDegreePolynomial1(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return x
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 1.0/6.0, 9)
def testFirstDegreePolynomial2(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 4.0 * x
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 4.0/6.0, 9)
def testFirstDegreePolynomial3(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return y
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 1.0/6.0, 9)
def testFirstDegreePolynomial4(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 4.0 * y
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 4.0/6.0, 9)
def testFirstDegreePolynomial5(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 2.0 * x + 3.0 * y + 4.0
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 2.0/6.0 + 3.0 / 6.0 + 2.0, 9)
def testSecondDegreePolynomial1(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return x * x
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 1.0/12.0, 9)
def testSecondDegreePolynomial2(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 3.0 * x * x
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 3.0/12.0, 9)
def testSecondDegreePolynomial3(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return x * y
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 1.0/24.0, 9)
def testSecondDegreePolynomial4(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 3.0 * x * y
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 3.0/24.0, 9)
def testSecondDegreePolynomial5(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return y * y
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 1.0/12.0, 9)
def testSecondDegreePolynomial6(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 3.0 * y * y
result = Integrator.integrate2D(f, e)
self.assertAlmostEqual(result, 3.0/12.0, 9)
def testSecondDegreePolynomial7(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 2.0 * x * x + 3.0 * x * y + 4.0 * y * y + 5.0 * x + 6.0 * y + 7.0
result = Integrator.integrate2D(f, e)
expected = 5.9583333333333339
self.assertAlmostEqual(result, expected, 9)
def testTransformOrigin1(self):
p1 = Pslg.GridPoint(0,0)
p2 = Pslg.GridPoint(1,0)
p3 = Pslg.GridPoint(0,1)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
values = Integrator.transformOrigin(e, 0, 0)
self.assertAlmostEqual(values[0], 0)
self.assertAlmostEqual(values[1], 0)
values = Integrator.transformOrigin(e, 1, 0)
self.assertAlmostEqual(values[0], 1)
self.assertAlmostEqual(values[1], 0)
values = Integrator.transformOrigin(e, 0, 1)
self.assertAlmostEqual(values[0], 0)
self.assertAlmostEqual(values[1], 1)
values = Integrator.transformOrigin(e, 0.5, 0)
self.assertAlmostEqual(values[0], 0.5)
self.assertAlmostEqual(values[1], 0)
values = Integrator.transformOrigin(e, 0, 0.5)
self.assertAlmostEqual(values[0], 0)
self.assertAlmostEqual(values[1], 0.5)
values = Integrator.transformOrigin(e, 0.5, 0.5)
self.assertAlmostEqual(values[0], 0.5)
self.assertAlmostEqual(values[1], 0.5)
values = Integrator.transformOrigin(e, 0.25, 0.5)
self.assertAlmostEqual(values[0], 0.25)
self.assertAlmostEqual(values[1], 0.5)
def testTransformOrigin2(self):
p1 = Pslg.GridPoint(3,4)
p2 = Pslg.GridPoint(5,4)
p3 = Pslg.GridPoint(3,4.5)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
values = Integrator.transformOrigin(e, 0, 0)
self.assertAlmostEqual(values[0], 3)
self.assertAlmostEqual(values[1], 4)
values = Integrator.transformOrigin(e, 1, 0)
self.assertAlmostEqual(values[0], 5)
self.assertAlmostEqual(values[1], 4)
values = Integrator.transformOrigin(e, 0, 1)
self.assertAlmostEqual(values[0], 3)
self.assertAlmostEqual(values[1], 4.5)
values = Integrator.transformOrigin(e, 0.5, 0)
self.assertAlmostEqual(values[0], 4)
self.assertAlmostEqual(values[1], 4)
values = Integrator.transformOrigin(e, 0, 0.5)
self.assertAlmostEqual(values[0], 3)
self.assertAlmostEqual(values[1], 4.25)
values = Integrator.transformOrigin(e, 0.5, 0.5)
self.assertAlmostEqual(values[0], 4)
self.assertAlmostEqual(values[1], 4.25)
values = Integrator.transformOrigin(e, 0.25, 0.5)
self.assertAlmostEqual(values[0], 3.5)
self.assertAlmostEqual(values[1], 4.25)
def testTransformOrigin3(self):
p1 = Pslg.GridPoint(4,4)
p2 = Pslg.GridPoint(7,5)
p3 = Pslg.GridPoint(5,7)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
values = Integrator.transformOrigin(e, 0, 0)
self.assertAlmostEqual(values[0], 4)
self.assertAlmostEqual(values[1], 4)
values = Integrator.transformOrigin(e, 1, 0)
self.assertAlmostEqual(values[0], 7)
self.assertAlmostEqual(values[1], 5)
values = Integrator.transformOrigin(e, 0, 1)
self.assertAlmostEqual(values[0], 5)
self.assertAlmostEqual(values[1], 7)
values = Integrator.transformOrigin(e, 0.5, 0)
self.assertAlmostEqual(values[0], 5.5)
self.assertAlmostEqual(values[1], 4.5)
values = Integrator.transformOrigin(e, 0, 0.5)
self.assertAlmostEqual(values[0], 4.5)
self.assertAlmostEqual(values[1], 5.5)
values = Integrator.transformOrigin(e, 0.5, 0.5)
self.assertAlmostEqual(values[0], 6)
self.assertAlmostEqual(values[1], 6)
values = Integrator.transformOrigin(e, 0.25, 0.5)
self.assertAlmostEqual(values[0], 4 + 5.0 / 4.0)
self.assertAlmostEqual(values[1], 4 + 7.0 / 4.0)
def testIntegration1(self):
p1 = Pslg.GridPoint(2,2)
p2 = Pslg.GridPoint(5,4)
p3 = Pslg.GridPoint(5,6)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 2.0 * x * x
result = Integrator.integrate2D(f, e)
expected = 99
self.assertAlmostEqual(result, expected, 9)
def testIntegration2(self):
p1 = Pslg.GridPoint(2,2)
p2 = Pslg.GridPoint(5,4)
p3 = Pslg.GridPoint(5,6)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 3.0 * x * y
result = Integrator.integrate2D(f, e)
expected = 148.5
self.assertAlmostEqual(result, expected, 9)
def testIntegration3(self):
p1 = Pslg.GridPoint(2,2)
p2 = Pslg.GridPoint(5,4)
p3 = Pslg.GridPoint(5,6)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 4.0 * y * y
result = Integrator.integrate2D(f, e)
expected = 200
self.assertAlmostEqual(result, expected, 9)
def testIntegration4(self):
p1 = Pslg.GridPoint(2,2)
p2 = Pslg.GridPoint(5,4)
p3 = Pslg.GridPoint(5,6)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 5.0 * x
result = Integrator.integrate2D(f, e)
expected = 60
self.assertAlmostEqual(result, expected, 9)
def testIntegration5(self):
p1 = Pslg.GridPoint(2,2)
p2 = Pslg.GridPoint(5,4)
p3 = Pslg.GridPoint(5,6)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 6.0 * y
result = Integrator.integrate2D(f, e)
expected = 72
self.assertAlmostEqual(result, expected, 9)
def testIntegration6(self):
p1 = Pslg.GridPoint(2,2)
p2 = Pslg.GridPoint(5,4)
p3 = Pslg.GridPoint(5,6)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 7.0
result = Integrator.integrate2D(f, e)
expected = 21
self.assertAlmostEqual(result, expected, 9)
def testIntegration7(self):
p1 = Pslg.GridPoint(2,2)
p2 = Pslg.GridPoint(5,4)
p3 = Pslg.GridPoint(5,6)
e = ElementAwarePslg.Element(p1, p2, p3, 0, 0)
def f(x, y):
return 2.0 * x * x + 3.0 * x * y + 4.0 * y * y + 5.0 * x + 6.0 * y + 7.0
result = Integrator.integrate2D(f, e)
expected = 600.5
self.assertAlmostEqual(result, expected, 9)
def testTransformSegemntParameter1(self):
p1 = Pslg.GridPoint(1,2)
p2 = Pslg.GridPoint(3,-2)
s = Pslg.Segment(p1, p2)
result = Integrator.transformSegementParameter(s, -1)
expected = [1,2]
self.assertAlmostEquals(result[0], expected[0], 9)
self.assertAlmostEquals(result[1], expected[1], 9)
result = Integrator.transformSegementParameter(s, -0.5)
expected = [1.5,1]
self.assertAlmostEquals(result[0], expected[0], 9)
self.assertAlmostEquals(result[1], expected[1], 9)
result = Integrator.transformSegementParameter(s, 0)
expected = [2,0]
self.assertAlmostEquals(result[0], expected[0], 9)
self.assertAlmostEquals(result[1], expected[1], 9)
result = Integrator.transformSegementParameter(s, 0.5)
expected = [2.5,-1]
self.assertAlmostEquals(result[0], expected[0], 9)
self.assertAlmostEquals(result[1], expected[1], 9)
result = Integrator.transformSegementParameter(s, 1)
expected = [3,-2]
self.assertAlmostEquals(result[0], expected[0], 9)
self.assertAlmostEquals(result[1], expected[1], 9)
def testGetTransformationFactor1(self):
p1 = Pslg.GridPoint(1,2)
p2 = Pslg.GridPoint(3,-2)
s = Pslg.Segment(p1, p2)
result = Integrator.getTransformationFactor(s)
expected = math.sqrt(5)
self.assertAlmostEqual(result, expected, 9)
def testIntegration1D1(self):
p1 = Pslg.GridPoint(1,2)
p2 = Pslg.GridPoint(3,-2)
s = Pslg.Segment(p1, p2)
def f(x, y):
return 2 * x * x
result = Integrator.integrate1D(f, s)
expected = 52 * math.sqrt(5) / 3.0
self.assertAlmostEqual(result, expected, 9)
def testIntegration1D2(self):
p1 = Pslg.GridPoint(1,2)
p2 = Pslg.GridPoint(3,-2)
s = Pslg.Segment(p1, p2)
def f(x, y):
return 3 * x * y
result = Integrator.integrate1D(f, s)
expected = -12 * math.sqrt(5) / 3.0
self.assertAlmostEqual(result, expected, 9)
def testIntegration1D3(self):
p1 = Pslg.GridPoint(1,2)
p2 = Pslg.GridPoint(3,-2)
s = Pslg.Segment(p1, p2)
def f(x, y):
return 4 * y * y
result = Integrator.integrate1D(f, s)
expected = 32 * math.sqrt(5) / 3.0
self.assertAlmostEqual(result, expected, 9)
def testIntegration1D4(self):
p1 = Pslg.GridPoint(1,2)
p2 = Pslg.GridPoint(3,-2)
s = Pslg.Segment(p1, p2)
def f(x, y):
return 5 * x
result = Integrator.integrate1D(f, s)
expected = 20 * math.sqrt(5)
self.assertAlmostEqual(result, expected, 9)
def testIntegration1D5(self):
p1 = Pslg.GridPoint(1,2)
p2 = Pslg.GridPoint(3,-2)
s = Pslg.Segment(p1, p2)
def f(x, y):
return 6 * y
result = Integrator.integrate1D(f, s)
expected = 0
self.assertAlmostEqual(result, expected, 9)
def testIntegration1D6(self):
p1 = Pslg.GridPoint(1,2)
p2 = Pslg.GridPoint(3,-2)
s = Pslg.Segment(p1, p2)
def f(x, y):
return 7.0
result = Integrator.integrate1D(f, s)
expected = 14 * math.sqrt(5)
self.assertAlmostEqual(result, expected, 9)
def testIntegration1D7(self):
p1 = Pslg.GridPoint(1,2)
p2 = Pslg.GridPoint(3,-2)
s = Pslg.Segment(p1, p2)
def f(x, y):
return 2.0 * x * x + 3.0 * x * y + 4.0 * y * y + 5.0 * x + 6.0 * y + 7.0
result = Integrator.integrate1D(f, s)
expected = 58 * math.sqrt(5)
self.assertAlmostEqual(result, expected, 9)
if __name__ == '__main__':
unittest.main()
| |
# -*- coding -*-
"""
Provides step definitions to:
* run commands, like behave
* create textual files within a working directory
TODO:
matcher that ignores empty lines and whitespace and has contains comparison
"""
from __future__ import absolute_import, print_function
from behave import given, when, then, step, matchers
from behave4cmd0 import command_shell, command_util, pathutil, textutil
from behave4cmd0.pathutil import posixpath_normpath
import contextlib
import difflib
import os
import shutil
from hamcrest import assert_that, equal_to, is_not, contains_string
# -----------------------------------------------------------------------------
# INIT:
# -----------------------------------------------------------------------------
matchers.register_type(int=int)
DEBUG = False
# -----------------------------------------------------------------------------
# UTILITIES:
# -----------------------------------------------------------------------------
@contextlib.contextmanager
def on_assert_failed_print_details(actual, expected):
"""
Print text details in case of assertation failed errors.
.. sourcecode:: python
with on_assert_failed_print_details(actual_text, expected_text):
assert actual == expected
"""
try:
yield
except AssertionError:
# diff = difflib.unified_diff(expected.splitlines(), actual.splitlines(),
# "expected", "actual")
diff = difflib.ndiff(expected.splitlines(), actual.splitlines())
diff_text = u"\n".join(diff)
print(u"DIFF (+ ACTUAL, - EXPECTED):\n{0}\n".format(diff_text))
if DEBUG:
print(u"expected:\n{0}\n".format(expected))
print(u"actual:\n{0}\n".format(actual))
raise
@contextlib.contextmanager
def on_error_print_details(actual, expected):
"""
Print text details in case of assertation failed errors.
.. sourcecode:: python
with on_error_print_details(actual_text, expected_text):
... # Do something
"""
try:
yield
except Exception:
diff = difflib.ndiff(expected.splitlines(), actual.splitlines())
diff_text = u"\n".join(diff)
print(u"DIFF (+ ACTUAL, - EXPECTED):\n{0}\n".format(diff_text))
if DEBUG:
print(u"expected:\n{0}\n".format(expected))
print(u"actual:\n{0}".format(actual))
raise
# -----------------------------------------------------------------------------
# STEPS: WORKING DIR
# -----------------------------------------------------------------------------
@given(u'a new working directory')
def step_a_new_working_directory(context):
"""
Creates a new, empty working directory
"""
command_util.ensure_context_attribute_exists(context, "workdir", None)
command_util.ensure_workdir_exists(context)
shutil.rmtree(context.workdir, ignore_errors=True)
@given(u'I use the current directory as working directory')
def step_use_curdir_as_working_directory(context):
"""
Uses the current directory as working directory
"""
context.workdir = os.path.abspath(".")
command_util.ensure_workdir_exists(context)
# -----------------------------------------------------------------------------
# STEPS: Create files with contents
# -----------------------------------------------------------------------------
@given(u'a file named "{filename}" and encoding="{encoding}" with')
def step_a_file_named_filename_and_encoding_with(context, filename, encoding):
"""Creates a textual file with the content provided as docstring."""
__encoding_is_valid = True
assert context.text is not None, "ENSURE: multiline text is provided."
assert not os.path.isabs(filename)
assert __encoding_is_valid
command_util.ensure_workdir_exists(context)
filename2 = os.path.join(context.workdir, filename)
pathutil.create_textfile_with_contents(filename2, context.text, encoding)
@given(u'a file named "{filename}" with')
def step_a_file_named_filename_with(context, filename):
"""Creates a textual file with the content provided as docstring."""
step_a_file_named_filename_and_encoding_with(context, filename, "UTF-8")
# -- SPECIAL CASE: For usage with behave steps.
if filename.endswith(".feature"):
command_util.ensure_context_attribute_exists(context, "features", [])
context.features.append(filename)
@given(u'an empty file named "{filename}"')
def step_an_empty_file_named_filename(context, filename):
"""
Creates an empty file.
"""
assert not os.path.isabs(filename)
command_util.ensure_workdir_exists(context)
filename2 = os.path.join(context.workdir, filename)
pathutil.create_textfile_with_contents(filename2, "")
# -----------------------------------------------------------------------------
# STEPS: Run commands
# -----------------------------------------------------------------------------
@when(u'I run "{command}"')
def step_i_run_command(context, command):
"""
Run a command as subprocess, collect its output and returncode.
"""
command_util.ensure_workdir_exists(context)
context.command_result = command_shell.run(command, cwd=context.workdir)
command_util.workdir_save_coverage_files(context.workdir)
if False and DEBUG:
print(u"run_command: {0}".format(command))
print(u"run_command.output {0}".format(context.command_result.output))
@when(u'I successfully run "{command}"')
def step_i_successfully_run_command(context, command):
step_i_run_command(context, command)
step_it_should_pass(context)
@then(u'it should fail with result "{result:int}"')
def step_it_should_fail_with_result(context, result):
assert_that(context.command_result.returncode, equal_to(result))
assert_that(result, is_not(equal_to(0)))
@then(u'the command should fail with returncode="{result:int}"')
def step_it_should_fail_with_returncode(context, result):
assert_that(context.command_result.returncode, equal_to(result))
assert_that(result, is_not(equal_to(0)))
@then(u'the command returncode is "{result:int}"')
def step_the_command_returncode_is(context, result):
assert_that(context.command_result.returncode, equal_to(result))
@then(u'the command returncode is non-zero')
def step_the_command_returncode_is_nonzero(context):
assert_that(context.command_result.returncode, is_not(equal_to(0)))
@then(u'it should pass')
def step_it_should_pass(context):
assert_that(context.command_result.returncode, equal_to(0),
context.command_result.output)
@then(u'it should fail')
def step_it_should_fail(context):
assert_that(context.command_result.returncode, is_not(equal_to(0)),
context.command_result.output)
@then(u'it should pass with')
def step_it_should_pass_with(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should pass with:
"""
TEXT
"""
'''
assert context.text is not None, "ENSURE: multiline text is provided."
step_command_output_should_contain(context)
assert_that(context.command_result.returncode, equal_to(0),
context.command_result.output)
@then(u'it should fail with')
def step_it_should_fail_with(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should fail with:
"""
TEXT
"""
'''
assert context.text is not None, "ENSURE: multiline text is provided."
step_command_output_should_contain(context)
assert_that(context.command_result.returncode, is_not(equal_to(0)))
# -----------------------------------------------------------------------------
# STEPS FOR: Output Comparison
# -----------------------------------------------------------------------------
@then(u'the command output should contain "{text}"')
def step_command_output_should_contain_text(context, text):
'''
EXAMPLE:
...
Then the command output should contain "TEXT"
'''
expected_text = text
if "{__WORKDIR__}" in expected_text or "{__CWD__}" in expected_text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
with on_assert_failed_print_details(actual_output, expected_text):
textutil.assert_normtext_should_contain(actual_output, expected_text)
@then(u'the command output should not contain "{text}"')
def step_command_output_should_not_contain_text(context, text):
'''
EXAMPLE:
...
then the command output should not contain "TEXT"
'''
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
with on_assert_failed_print_details(actual_output, expected_text):
textutil.assert_normtext_should_not_contain(actual_output, expected_text)
@then(u'the command output should contain exactly "{text}"')
def step_command_output_should_contain_exactly_text(context, text):
"""
Verifies that the command output of the last command contains the
expected text.
.. code-block:: gherkin
When I run "echo Hello"
Then the command output should contain "Hello"
"""
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
textutil.assert_text_should_contain_exactly(actual_output, expected_text)
@then(u'the command output should not contain exactly "{text}"')
def step_command_output_should_not_contain_exactly_text(context, text):
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
actual_output = context.command_result.output
textutil.assert_text_should_not_contain_exactly(actual_output, expected_text)
@then(u'the command output should contain')
def step_command_output_should_contain(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should pass
and the command output should contain:
"""
TEXT
"""
'''
assert context.text is not None, "REQUIRE: multi-line text"
step_command_output_should_contain_text(context, context.text)
@then(u'the command output should not contain')
def step_command_output_should_not_contain(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should pass
and the command output should not contain:
"""
TEXT
"""
'''
assert context.text is not None, "REQUIRE: multi-line text"
step_command_output_should_not_contain_text(context, context.text.strip())
@then(u'the command output should contain exactly')
def step_command_output_should_contain_exactly_with_multiline_text(context):
assert context.text is not None, "REQUIRE: multi-line text"
step_command_output_should_contain_exactly_text(context, context.text)
@then(u'the command output should not contain exactly')
def step_command_output_should_contain_not_exactly_with_multiline_text(context):
assert context.text is not None, "REQUIRE: multi-line text"
step_command_output_should_not_contain_exactly_text(context, context.text)
# -----------------------------------------------------------------------------
# STEPS FOR: Directories
# -----------------------------------------------------------------------------
@step(u'I remove the directory "{directory}"')
def step_remove_directory(context, directory):
path_ = directory
if not os.path.isabs(directory):
path_ = os.path.join(context.workdir, os.path.normpath(directory))
if os.path.isdir(path_):
shutil.rmtree(path_, ignore_errors=True)
assert_that(not os.path.isdir(path_))
@given(u'I ensure that the directory "{directory}" does not exist')
def step_given_the_directory_should_not_exist(context, directory):
step_remove_directory(context, directory)
@given(u'a directory named "{path}"')
def step_directory_named_dirname(context, path):
assert context.workdir, "REQUIRE: context.workdir"
path_ = os.path.join(context.workdir, os.path.normpath(path))
if not os.path.exists(path_):
os.makedirs(path_)
assert os.path.isdir(path_)
@then(u'the directory "{directory}" should exist')
def step_the_directory_should_exist(context, directory):
path_ = directory
if not os.path.isabs(directory):
path_ = os.path.join(context.workdir, os.path.normpath(directory))
assert_that(os.path.isdir(path_))
@then(u'the directory "{directory}" should not exist')
def step_the_directory_should_not_exist(context, directory):
path_ = directory
if not os.path.isabs(directory):
path_ = os.path.join(context.workdir, os.path.normpath(directory))
assert_that(not os.path.isdir(path_))
@step(u'the directory "{directory}" exists')
def step_directory_exists(context, directory):
"""
Verifies that a directory exists.
.. code-block:: gherkin
Given the directory "abc.txt" exists
When the directory "abc.txt" exists
"""
step_the_directory_should_exist(context, directory)
@step(u'the directory "{directory}" does not exist')
def step_directory_named_does_not_exist(context, directory):
"""
Verifies that a directory does not exist.
.. code-block:: gherkin
Given the directory "abc/" does not exist
When the directory "abc/" does not exist
"""
step_the_directory_should_not_exist(context, directory)
# -----------------------------------------------------------------------------
# FILE STEPS:
# -----------------------------------------------------------------------------
@step(u'a file named "{filename}" exists')
def step_file_named_filename_exists(context, filename):
"""
Verifies that a file with this filename exists.
.. code-block:: gherkin
Given a file named "abc.txt" exists
When a file named "abc.txt" exists
"""
step_file_named_filename_should_exist(context, filename)
@step(u'a file named "{filename}" does not exist')
def step_file_named_filename_does_not_exist(context, filename):
"""
Verifies that a file with this filename does not exist.
.. code-block:: gherkin
Given a file named "abc.txt" does not exist
When a file named "abc.txt" does not exist
"""
step_file_named_filename_should_not_exist(context, filename)
@then(u'a file named "{filename}" should exist')
def step_file_named_filename_should_exist(context, filename):
command_util.ensure_workdir_exists(context)
filename_ = pathutil.realpath_with_context(filename, context)
assert_that(os.path.exists(filename_) and os.path.isfile(filename_))
@then(u'a file named "{filename}" should not exist')
def step_file_named_filename_should_not_exist(context, filename):
command_util.ensure_workdir_exists(context)
filename_ = pathutil.realpath_with_context(filename, context)
assert_that(not os.path.exists(filename_))
# -----------------------------------------------------------------------------
# STEPS FOR FILE CONTENTS:
# -----------------------------------------------------------------------------
@then(u'the file "{filename}" should contain "{text}"')
def step_file_should_contain_text(context, filename, text):
expected_text = text
if "{__WORKDIR__}" in text or "{__CWD__}" in text:
expected_text = textutil.template_substitute(text,
__WORKDIR__ = posixpath_normpath(context.workdir),
__CWD__ = posixpath_normpath(os.getcwd())
)
file_contents = pathutil.read_file_contents(filename, context=context)
file_contents = file_contents.rstrip()
with on_assert_failed_print_details(file_contents, expected_text):
textutil.assert_normtext_should_contain(file_contents, expected_text)
@then(u'the file "{filename}" should not contain "{text}"')
def step_file_should_not_contain_text(context, filename, text):
file_contents = pathutil.read_file_contents(filename, context=context)
file_contents = file_contents.rstrip()
textutil.assert_normtext_should_not_contain(file_contents, text)
# XXX assert_that(file_contents, is_not(contains_string(text)))
@then(u'the file "{filename}" should contain')
def step_file_should_contain_multiline_text(context, filename):
assert context.text is not None, "REQUIRE: multiline text"
step_file_should_contain_text(context, filename, context.text)
@then(u'the file "{filename}" should not contain')
def step_file_should_not_contain_multiline_text(context, filename):
assert context.text is not None, "REQUIRE: multiline text"
step_file_should_not_contain_text(context, filename, context.text)
# -----------------------------------------------------------------------------
# ENVIRONMENT VARIABLES
# -----------------------------------------------------------------------------
@step(u'I set the environment variable "{env_name}" to "{env_value}"')
def step_I_set_the_environment_variable_to(context, env_name, env_value):
if not hasattr(context, "environ"):
context.environ = {}
context.environ[env_name] = env_value
os.environ[env_name] = env_value
@step(u'I remove the environment variable "{env_name}"')
def step_I_remove_the_environment_variable(context, env_name):
if not hasattr(context, "environ"):
context.environ = {}
context.environ[env_name] = ""
os.environ[env_name] = ""
del context.environ[env_name]
del os.environ[env_name]
| |
"""
Automatic Colour Conversion Graph
=================================
Defines the automatic colour conversion graph objects:
- :func:`colour.describe_conversion_path`
- :func:`colour.convert`
"""
from __future__ import annotations
import inspect
import numpy as np
import textwrap
from collections import namedtuple
from copy import copy
from functools import partial
from pprint import pformat
import colour
from colour.colorimetry import (
CCS_ILLUMINANTS,
MultiSpectralDistributions,
SDS_ILLUMINANTS,
SpectralDistribution,
TVS_ILLUMINANTS_HUNTERLAB,
)
from colour.colorimetry import (
colorimetric_purity,
complementary_wavelength,
dominant_wavelength,
excitation_purity,
lightness,
luminance,
luminous_efficacy,
luminous_efficiency,
luminous_flux,
whiteness,
yellowness,
wavelength_to_XYZ,
)
from colour.hints import (
Any,
ArrayLike,
Callable,
Dict,
FloatingOrArrayLike,
FloatingOrNDArray,
Integer,
List,
Literal,
NDArray,
Number,
Optional,
Union,
cast,
)
from colour.recovery import XYZ_to_sd
from colour.models import RGB_COLOURSPACE_sRGB
from colour.models import (
CAM02LCD_to_JMh_CIECAM02,
CAM02SCD_to_JMh_CIECAM02,
CAM02UCS_to_JMh_CIECAM02,
CAM16LCD_to_JMh_CAM16,
CAM16SCD_to_JMh_CAM16,
CAM16UCS_to_JMh_CAM16,
CMYK_to_CMY,
CMY_to_CMYK,
CMY_to_RGB,
DIN99_to_XYZ,
HCL_to_RGB,
HSL_to_RGB,
HSV_to_RGB,
Hunter_Lab_to_XYZ,
Hunter_Rdab_to_XYZ,
ICaCb_to_XYZ,
ICtCp_to_XYZ,
IHLS_to_RGB,
IgPgTg_to_XYZ,
IPT_to_XYZ,
JMh_CAM16_to_CAM16LCD,
JMh_CAM16_to_CAM16SCD,
JMh_CAM16_to_CAM16UCS,
JMh_CIECAM02_to_CAM02LCD,
JMh_CIECAM02_to_CAM02SCD,
JMh_CIECAM02_to_CAM02UCS,
Jzazbz_to_XYZ,
LCHab_to_Lab,
LCHuv_to_Luv,
Lab_to_LCHab,
Lab_to_XYZ,
Luv_to_LCHuv,
Luv_to_XYZ,
Luv_to_uv,
Luv_uv_to_xy,
OSA_UCS_to_XYZ,
Oklab_to_XYZ,
Prismatic_to_RGB,
ProLab_to_XYZ,
RGB_Colourspace,
RGB_luminance,
RGB_to_CMY,
RGB_to_HCL,
RGB_to_HSL,
RGB_to_HSV,
RGB_to_IHLS,
RGB_to_Prismatic,
RGB_to_RGB,
RGB_to_XYZ,
RGB_to_YCbCr,
RGB_to_YCoCg,
RGB_to_YcCbcCrc,
UCS_to_XYZ,
UCS_to_uv,
UCS_uv_to_xy,
UVW_to_XYZ,
XYZ_to_DIN99,
XYZ_to_Hunter_Lab,
XYZ_to_Hunter_Rdab,
XYZ_to_ICaCb,
XYZ_to_ICtCp,
XYZ_to_IgPgTg,
XYZ_to_IPT,
XYZ_to_Jzazbz,
XYZ_to_Lab,
XYZ_to_Luv,
XYZ_to_OSA_UCS,
XYZ_to_Oklab,
XYZ_to_ProLab,
XYZ_to_RGB,
XYZ_to_UCS,
XYZ_to_UVW,
XYZ_to_hdr_CIELab,
XYZ_to_hdr_IPT,
XYZ_to_sRGB,
XYZ_to_xy,
XYZ_to_xyY,
YCbCr_to_RGB,
YCoCg_to_RGB,
YcCbcCrc_to_RGB,
cctf_decoding,
cctf_encoding,
hdr_CIELab_to_XYZ,
hdr_IPT_to_XYZ,
sRGB_to_XYZ,
uv_to_Luv,
uv_to_UCS,
xyY_to_XYZ,
xyY_to_xy,
xy_to_Luv_uv,
xy_to_UCS_uv,
xy_to_XYZ,
xy_to_xyY,
)
from colour.notation import (
HEX_to_RGB,
RGB_to_HEX,
munsell_value,
munsell_colour_to_xyY,
xyY_to_munsell_colour,
)
from colour.quality import colour_quality_scale, colour_rendering_index
from colour.appearance import (
CAM_Specification_CAM16,
CAM16_to_XYZ,
CAM_Specification_CIECAM02,
CIECAM02_to_XYZ,
Kim2009_to_XYZ,
XYZ_to_ATD95,
XYZ_to_CAM16,
XYZ_to_CIECAM02,
XYZ_to_Hunt,
XYZ_to_Kim2009,
XYZ_to_LLAB,
XYZ_to_Nayatani95,
XYZ_to_RLAB,
XYZ_to_ZCAM,
ZCAM_to_XYZ,
)
from colour.appearance.ciecam02 import CAM_KWARGS_CIECAM02_sRGB
from colour.temperature import CCT_to_uv, uv_to_CCT
from colour.utilities import (
as_float_array,
domain_range_scale,
filter_kwargs,
message_box,
optional,
required,
tsplit,
tstack,
usage_warning,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"Conversion_Specification",
"sd_to_XYZ",
"CIECAM02_to_JMh_CIECAM02",
"JMh_CIECAM02_to_CIECAM02",
"CAM16_to_JMh_CAM16",
"JMh_CAM16_to_CAM16",
"XYZ_to_luminance",
"RGB_luminance_to_RGB",
"CONVERSION_SPECIFICATIONS_DATA",
"CONVERSION_GRAPH_NODE_LABELS",
"CONVERSION_SPECIFICATIONS",
"CONVERSION_GRAPH",
"describe_conversion_path",
"convert",
]
class Conversion_Specification(
namedtuple(
"Conversion_Specification", ("source", "target", "conversion_function")
)
):
"""
Conversion specification for *Colour* graph for automatic colour
conversion describing two nodes and the edge in the graph.
Parameters
----------
source
Source node in the graph.
target
Target node in the graph.
conversion_function
Callable converting from the ``source`` node to the ``target`` node.
"""
def __new__(cls, source: str, target: str, conversion_function: Callable):
"""
Return a new instance of the
:class:`colour.graph.conversion.Conversion_Specification` class.
"""
return super().__new__(
cls, source.lower(), target.lower(), conversion_function
)
def sd_to_XYZ(
sd: Union[ArrayLike, SpectralDistribution, MultiSpectralDistributions],
cmfs: Optional[MultiSpectralDistributions] = None,
illuminant: Optional[SpectralDistribution] = None,
k: Optional[Number] = None,
method: Union[Literal["ASTM E308", "Integration"], str] = "ASTM E308",
**kwargs: Any,
) -> NDArray:
"""
Convert given spectral distribution to *CIE XYZ* tristimulus values using
given colour matching functions, illuminant and method.
This placeholder docstring is replaced with the modified
:func:`colour.sd_to_XYZ` definition docstring.
"""
illuminant = cast(
SpectralDistribution,
optional(illuminant, SDS_ILLUMINANTS[_ILLUMINANT_DEFAULT]),
)
return colour.sd_to_XYZ(sd, cmfs, illuminant, k, method, **kwargs)
# If-clause required for optimised python launch.
if colour.sd_to_XYZ.__doc__ is not None:
sd_to_XYZ.__doc__ = colour.sd_to_XYZ.__doc__.replace(
"CIE Illuminant E",
"CIE Standard Illuminant D65",
).replace(
"sd_to_XYZ(sd)",
"sd_to_XYZ(sd) # doctest: +SKIP",
)
def CIECAM02_to_JMh_CIECAM02(
specification: CAM_Specification_CIECAM02,
) -> NDArray:
"""
Convert from *CIECAM02* specification to *CIECAM02* :math:`JMh`
correlates.
Parameters
----------
specification
*CIECAM02* colour appearance model specification.
Returns
-------
:class:`numpy.ndarray`
*CIECAM02* :math:`JMh` correlates.
Examples
--------
>>> specification = CAM_Specification_CIECAM02(J=41.731091132513917,
... M=0.108842175669226,
... h=219.048432658311780)
>>> CIECAM02_to_JMh_CIECAM02(specification) # doctest: +ELLIPSIS
array([ 4.1731091...e+01, 1.0884217...e-01, 2.1904843...e+02])
"""
return tstack([specification.J, specification.M, specification.h])
def JMh_CIECAM02_to_CIECAM02(JMh: ArrayLike) -> CAM_Specification_CIECAM02:
"""
Convert from *CIECAM02* :math:`JMh` correlates to *CIECAM02*
specification.
Parameters
----------
JMh
*CIECAM02* :math:`JMh` correlates.
Returns
-------
:class:`colour.CAM_Specification_CIECAM02`
*CIECAM02* colour appearance model specification.
Examples
--------
>>> import numpy as np
>>> JMh = np.array([4.17310911e+01, 1.08842176e-01, 2.19048433e+02])
>>> JMh_CIECAM02_to_CIECAM02(JMh) # doctest: +ELLIPSIS
CAM_Specification_CIECAM02(J=41.7310911..., C=None, h=219.0484329..., \
s=None, Q=None, M=0.1088421..., H=None, HC=None)
"""
J, M, h = tsplit(JMh)
return CAM_Specification_CIECAM02(J=J, M=M, h=h)
def CAM16_to_JMh_CAM16(specification) -> NDArray:
"""
Convert from *CAM16* specification to *CAM16* :math:`JMh` correlates.
Parameters
----------
specification
*CAM16* colour appearance model specification.
Returns
-------
:class:`numpy.ndarray`
*CAM16* :math:`JMh` correlates.
Examples
--------
>>> specification = CAM_Specification_CAM16(J=41.731207905126638,
... M=0.107436772335905,
... h=217.067959767393010)
>>> CAM16_to_JMh_CAM16(specification) # doctest: +ELLIPSIS
array([ 4.1731207...e+01, 1.0743677...e-01, 2.1706796...e+02])
"""
return tstack([specification.J, specification.M, specification.h])
def JMh_CAM16_to_CAM16(JMh: ArrayLike) -> CAM_Specification_CAM16:
"""
Convert from *CAM6* :math:`JMh` correlates to *CAM6* specification.
Parameters
----------
JMh
*CAM6* :math:`JMh` correlates.
Returns
-------
:class:`colour.CAM6_Specification`
*CAM6* colour appearance model specification.
Examples
--------
>>> import numpy as np
>>> JMh = np.array([4.17312079e+01, 1.07436772e-01, 2.17067960e+02])
>>> JMh_CAM16_to_CAM16(JMh) # doctest: +ELLIPSIS
CAM_Specification_CAM16(J=41.7312079..., C=None, h=217.06796..., s=None, \
Q=None, M=0.1074367..., H=None, HC=None)
"""
J, M, h = tsplit(JMh)
return CAM_Specification_CAM16(J=J, M=M, h=h)
def XYZ_to_luminance(XYZ: ArrayLike) -> FloatingOrNDArray:
"""
Convert from *CIE XYZ* tristimulus values to *luminance* :math:`Y`.
Parameters
----------
XYZ
*CIE XYZ* tristimulus values.
Returns
-------
:class:`numpy.floating` or :class:`numpy.ndarray`
*Luminance* :math:`Y`.
Examples
--------
>>> import numpy as np
>>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
>>> XYZ_to_luminance(XYZ) # doctest: +ELLIPSIS
0.1219722...
"""
_X, Y, _Z = tsplit(XYZ)
return Y
def RGB_luminance_to_RGB(Y: FloatingOrArrayLike) -> NDArray:
"""
Convert from *luminance* :math:`Y` to *RGB*.
Parameters
----------
Y
*Luminance* :math:`Y`.
Returns
-------
:class:`numpy.ndarray`
*RGB*.
Examples
--------
>>> RGB_luminance_to_RGB(0.123014562384318) # doctest: +ELLIPSIS
array([ 0.1230145..., 0.1230145..., 0.1230145...])
"""
Y = as_float_array(Y)
return tstack([Y, Y, Y])
_ILLUMINANT_DEFAULT: str = "D65"
"""Default automatic colour conversion graph illuminant name."""
_CCS_ILLUMINANT_DEFAULT: NDArray = CCS_ILLUMINANTS[
"CIE 1931 2 Degree Standard Observer"
][_ILLUMINANT_DEFAULT]
"""
Default automatic colour conversion graph illuminant *CIE xy* chromaticity
coordinates.
"""
_TVS_ILLUMINANT_DEFAULT: NDArray = xy_to_XYZ(_CCS_ILLUMINANT_DEFAULT)
"""
Default automatic colour conversion graph illuminant *CIE XYZ* tristimulus
values.
"""
_RGB_COLOURSPACE_DEFAULT: RGB_Colourspace = RGB_COLOURSPACE_sRGB
"""Default automatic colour conversion graph *RGB* colourspace."""
_CAM_KWARGS_CIECAM02_sRGB: Dict = CAM_KWARGS_CIECAM02_sRGB.copy()
"""
Default parameter values for the *CIECAM02* colour appearance model usage in
the context of *sRGB*.
Warnings
--------
The *CIE XYZ* tristimulus values of reference white :math:`XYZ_w` is adjusted
for the domain-range scale **'1'**.
"""
_CAM_KWARGS_CIECAM02_sRGB["XYZ_w"] = _CAM_KWARGS_CIECAM02_sRGB["XYZ_w"] / 100
CONVERSION_SPECIFICATIONS_DATA: List = [
# Colorimetry
("Spectral Distribution", "CIE XYZ", sd_to_XYZ),
("CIE XYZ", "Spectral Distribution", XYZ_to_sd),
("Spectral Distribution", "Luminous Flux", luminous_flux),
("Spectral Distribution", "Luminous Efficiency", luminous_efficiency),
("Spectral Distribution", "Luminous Efficacy", luminous_efficacy),
("CIE XYZ", "Luminance", XYZ_to_luminance),
("Luminance", "Lightness", lightness),
("Lightness", "Luminance", luminance),
(
"CIE XYZ",
"Whiteness",
partial(whiteness, XYZ_0=_TVS_ILLUMINANT_DEFAULT),
),
("CIE XYZ", "Yellowness", yellowness),
(
"CIE xy",
"Colorimetric Purity",
partial(colorimetric_purity, xy_n=_CCS_ILLUMINANT_DEFAULT),
),
(
"CIE xy",
"Complementary Wavelength",
partial(complementary_wavelength, xy_n=_CCS_ILLUMINANT_DEFAULT),
),
(
"CIE xy",
"Dominant Wavelength",
partial(dominant_wavelength, xy_n=_CCS_ILLUMINANT_DEFAULT),
),
(
"CIE xy",
"Excitation Purity",
partial(excitation_purity, xy_n=_CCS_ILLUMINANT_DEFAULT),
),
("Wavelength", "CIE XYZ", wavelength_to_XYZ),
# Colour Models
("CIE XYZ", "CIE xyY", XYZ_to_xyY),
("CIE xyY", "CIE XYZ", xyY_to_XYZ),
("CIE xyY", "CIE xy", xyY_to_xy),
("CIE xy", "CIE xyY", xy_to_xyY),
("CIE XYZ", "CIE xy", XYZ_to_xy),
("CIE xy", "CIE XYZ", xy_to_XYZ),
("CIE XYZ", "CIE Lab", XYZ_to_Lab),
("CIE Lab", "CIE XYZ", Lab_to_XYZ),
("CIE Lab", "CIE LCHab", Lab_to_LCHab),
("CIE LCHab", "CIE Lab", LCHab_to_Lab),
("CIE XYZ", "CIE Luv", XYZ_to_Luv),
("CIE Luv", "CIE XYZ", Luv_to_XYZ),
("CIE Luv", "CIE Luv uv", Luv_to_uv),
("CIE Luv uv", "CIE Luv", uv_to_Luv),
("CIE Luv uv", "CIE xy", Luv_uv_to_xy),
("CIE xy", "CIE Luv uv", xy_to_Luv_uv),
("CIE Luv", "CIE LCHuv", Luv_to_LCHuv),
("CIE LCHuv", "CIE Luv", LCHuv_to_Luv),
("CIE XYZ", "CIE UCS", XYZ_to_UCS),
("CIE UCS", "CIE XYZ", UCS_to_XYZ),
("CIE UCS", "CIE UCS uv", UCS_to_uv),
("CIE UCS uv", "CIE UCS", uv_to_UCS),
("CIE UCS uv", "CIE xy", UCS_uv_to_xy),
("CIE xy", "CIE UCS uv", xy_to_UCS_uv),
("CIE XYZ", "CIE UVW", XYZ_to_UVW),
("CIE UVW", "CIE XYZ", UVW_to_XYZ),
("CIE XYZ", "DIN99", XYZ_to_DIN99),
("DIN99", "CIE XYZ", DIN99_to_XYZ),
("CIE XYZ", "hdr-CIELAB", XYZ_to_hdr_CIELab),
("hdr-CIELAB", "CIE XYZ", hdr_CIELab_to_XYZ),
(
"CIE XYZ",
"Hunter Lab",
partial(
XYZ_to_Hunter_Lab,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB[
"CIE 1931 2 Degree Standard Observer"
]["D65"].XYZ_n
/ 100,
),
),
(
"Hunter Lab",
"CIE XYZ",
partial(
Hunter_Lab_to_XYZ,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB[
"CIE 1931 2 Degree Standard Observer"
]["D65"].XYZ_n
/ 100,
),
),
(
"CIE XYZ",
"Hunter Rdab",
partial(
XYZ_to_Hunter_Rdab,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB[
"CIE 1931 2 Degree Standard Observer"
]["D65"].XYZ_n
/ 100,
),
),
(
"Hunter Rdab",
"CIE XYZ",
partial(
Hunter_Rdab_to_XYZ,
XYZ_n=TVS_ILLUMINANTS_HUNTERLAB[
"CIE 1931 2 Degree Standard Observer"
]["D65"].XYZ_n
/ 100,
),
),
("CIE XYZ", "ICaCb", XYZ_to_ICaCb),
("ICaCb", "CIE XYZ", ICaCb_to_XYZ),
("CIE XYZ", "ICtCp", XYZ_to_ICtCp),
("ICtCp", "CIE XYZ", ICtCp_to_XYZ),
("CIE XYZ", "IgPgTg", XYZ_to_IgPgTg),
("IgPgTg", "CIE XYZ", IgPgTg_to_XYZ),
("CIE XYZ", "IPT", XYZ_to_IPT),
("IPT", "CIE XYZ", IPT_to_XYZ),
("CIE XYZ", "Jzazbz", XYZ_to_Jzazbz),
("Jzazbz", "CIE XYZ", Jzazbz_to_XYZ),
("CIE XYZ", "hdr-IPT", XYZ_to_hdr_IPT),
("hdr-IPT", "CIE XYZ", hdr_IPT_to_XYZ),
("CIE XYZ", "OSA UCS", XYZ_to_OSA_UCS),
("OSA UCS", "CIE XYZ", OSA_UCS_to_XYZ),
("CIE XYZ", "Oklab", XYZ_to_Oklab),
("Oklab", "CIE XYZ", Oklab_to_XYZ),
("CIE XYZ", "ProLab", XYZ_to_ProLab),
("ProLab", "CIE XYZ", ProLab_to_XYZ),
# RGB Colour Models
(
"CIE XYZ",
"RGB",
partial(
XYZ_to_RGB,
illuminant_XYZ=_RGB_COLOURSPACE_DEFAULT.whitepoint,
illuminant_RGB=_RGB_COLOURSPACE_DEFAULT.whitepoint,
matrix_XYZ_to_RGB=_RGB_COLOURSPACE_DEFAULT.matrix_XYZ_to_RGB,
),
),
(
"RGB",
"CIE XYZ",
partial(
RGB_to_XYZ,
illuminant_RGB=_RGB_COLOURSPACE_DEFAULT.whitepoint,
illuminant_XYZ=_RGB_COLOURSPACE_DEFAULT.whitepoint,
matrix_RGB_to_XYZ=_RGB_COLOURSPACE_DEFAULT.matrix_RGB_to_XYZ,
),
),
(
"RGB",
"Scene-Referred RGB",
partial(
RGB_to_RGB,
input_colourspace=_RGB_COLOURSPACE_DEFAULT,
output_colourspace=_RGB_COLOURSPACE_DEFAULT,
),
),
(
"Scene-Referred RGB",
"RGB",
partial(
RGB_to_RGB,
input_colourspace=_RGB_COLOURSPACE_DEFAULT,
output_colourspace=_RGB_COLOURSPACE_DEFAULT,
),
),
("RGB", "HSV", RGB_to_HSV),
("HSV", "RGB", HSV_to_RGB),
("RGB", "HSL", RGB_to_HSL),
("HSL", "RGB", HSL_to_RGB),
("RGB", "HCL", RGB_to_HCL),
("HCL", "RGB", HCL_to_RGB),
("RGB", "IHLS", RGB_to_IHLS),
("IHLS", "RGB", IHLS_to_RGB),
("CMY", "RGB", CMY_to_RGB),
("RGB", "CMY", RGB_to_CMY),
("CMY", "CMYK", CMY_to_CMYK),
("CMYK", "CMY", CMYK_to_CMY),
(
"RGB",
"RGB Luminance",
partial(
RGB_luminance,
primaries=_RGB_COLOURSPACE_DEFAULT.primaries,
whitepoint=_RGB_COLOURSPACE_DEFAULT.whitepoint,
),
),
("RGB Luminance", "RGB", RGB_luminance_to_RGB),
("RGB", "Prismatic", RGB_to_Prismatic),
("Prismatic", "RGB", Prismatic_to_RGB),
("Output-Referred RGB", "YCbCr", RGB_to_YCbCr),
("YCbCr", "Output-Referred RGB", YCbCr_to_RGB),
("RGB", "YcCbcCrc", RGB_to_YcCbcCrc),
("YcCbcCrc", "RGB", YcCbcCrc_to_RGB),
("Output-Referred RGB", "YCoCg", RGB_to_YCoCg),
("YCoCg", "Output-Referred RGB", YCoCg_to_RGB),
("RGB", "Output-Referred RGB", cctf_encoding),
("Output-Referred RGB", "RGB", cctf_decoding),
("Scene-Referred RGB", "Output-Referred RGB", cctf_encoding),
("Output-Referred RGB", "Scene-Referred RGB", cctf_decoding),
("CIE XYZ", "sRGB", XYZ_to_sRGB),
("sRGB", "CIE XYZ", sRGB_to_XYZ),
# Colour Notation Systems
("Output-Referred RGB", "Hexadecimal", RGB_to_HEX),
("Hexadecimal", "Output-Referred RGB", HEX_to_RGB),
("CIE xyY", "Munsell Colour", xyY_to_munsell_colour),
("Munsell Colour", "CIE xyY", munsell_colour_to_xyY),
("Luminance", "Munsell Value", munsell_value),
("Munsell Value", "Luminance", partial(luminance, method="ASTM D1535")),
# Colour Quality
("Spectral Distribution", "CRI", colour_rendering_index),
("Spectral Distribution", "CQS", colour_quality_scale),
# Colour Temperature
("CCT", "CIE UCS uv", CCT_to_uv),
("CIE UCS uv", "CCT", uv_to_CCT),
# Advanced Colorimetry
(
"CIE XYZ",
"ATD95",
partial(
XYZ_to_ATD95,
XYZ_0=_TVS_ILLUMINANT_DEFAULT,
Y_0=80 * 0.2,
k_1=0,
k_2=(15 + 50) / 2,
),
),
(
"CIE XYZ",
"CIECAM02",
partial(XYZ_to_CIECAM02, **_CAM_KWARGS_CIECAM02_sRGB),
),
(
"CIECAM02",
"CIE XYZ",
partial(CIECAM02_to_XYZ, **_CAM_KWARGS_CIECAM02_sRGB),
),
("CIECAM02", "CIECAM02 JMh", CIECAM02_to_JMh_CIECAM02),
("CIECAM02 JMh", "CIECAM02", JMh_CIECAM02_to_CIECAM02),
("CIE XYZ", "CAM16", partial(XYZ_to_CAM16, **_CAM_KWARGS_CIECAM02_sRGB)),
("CAM16", "CIE XYZ", partial(CAM16_to_XYZ, **_CAM_KWARGS_CIECAM02_sRGB)),
("CAM16", "CAM16 JMh", CAM16_to_JMh_CAM16),
("CAM16 JMh", "CAM16", JMh_CAM16_to_CAM16),
(
"CIE XYZ",
"Kim 2009",
partial(XYZ_to_Kim2009, XYZ_w=_TVS_ILLUMINANT_DEFAULT, L_A=80 * 0.2),
),
(
"Kim 2009",
"CIE XYZ",
partial(Kim2009_to_XYZ, XYZ_w=_TVS_ILLUMINANT_DEFAULT, L_A=80 * 0.2),
),
(
"CIE XYZ",
"Hunt",
partial(
XYZ_to_Hunt,
XYZ_w=_TVS_ILLUMINANT_DEFAULT,
XYZ_b=_TVS_ILLUMINANT_DEFAULT,
L_A=80 * 0.2,
CCT_w=6504,
),
),
(
"CIE XYZ",
"LLAB",
partial(
XYZ_to_LLAB, XYZ_0=_TVS_ILLUMINANT_DEFAULT, Y_b=80 * 0.2, L=80
),
),
(
"CIE XYZ",
"Nayatani95",
partial(
XYZ_to_Nayatani95,
XYZ_n=_TVS_ILLUMINANT_DEFAULT,
Y_o=0.2,
E_o=1000,
E_or=1000,
),
),
(
"CIE XYZ",
"RLAB",
partial(XYZ_to_RLAB, XYZ_n=_TVS_ILLUMINANT_DEFAULT, Y_n=20),
),
(
"CIE XYZ",
"ZCAM",
partial(
XYZ_to_ZCAM,
XYZ_w=_TVS_ILLUMINANT_DEFAULT,
L_A=64 / np.pi * 0.2,
Y_b=20,
),
),
(
"ZCAM",
"CIE XYZ",
partial(
ZCAM_to_XYZ,
XYZ_w=_TVS_ILLUMINANT_DEFAULT,
L_A=64 / np.pi * 0.2,
Y_b=20,
),
),
("CIECAM02 JMh", "CAM02LCD", JMh_CIECAM02_to_CAM02LCD),
("CAM02LCD", "CIECAM02 JMh", CAM02LCD_to_JMh_CIECAM02),
("CIECAM02 JMh", "CAM02SCD", JMh_CIECAM02_to_CAM02SCD),
("CAM02SCD", "CIECAM02 JMh", CAM02SCD_to_JMh_CIECAM02),
("CIECAM02 JMh", "CAM02UCS", JMh_CIECAM02_to_CAM02UCS),
("CAM02UCS", "CIECAM02 JMh", CAM02UCS_to_JMh_CIECAM02),
("CAM16 JMh", "CAM16LCD", JMh_CAM16_to_CAM16LCD),
("CAM16LCD", "CAM16 JMh", CAM16LCD_to_JMh_CAM16),
("CAM16 JMh", "CAM16SCD", JMh_CAM16_to_CAM16SCD),
("CAM16SCD", "CAM16 JMh", CAM16SCD_to_JMh_CAM16),
("CAM16 JMh", "CAM16UCS", JMh_CAM16_to_CAM16UCS),
("CAM16UCS", "CAM16 JMh", CAM16UCS_to_JMh_CAM16),
]
"""
Automatic colour conversion graph specifications data describing two nodes and
the edge in the graph.
"""
CONVERSION_SPECIFICATIONS: List = [
Conversion_Specification(*specification)
for specification in CONVERSION_SPECIFICATIONS_DATA
]
"""
Automatic colour conversion graph specifications describing two nodes and
the edge in the graph.
"""
CONVERSION_GRAPH_NODE_LABELS: Dict = {
specification[0].lower(): specification[0]
for specification in CONVERSION_SPECIFICATIONS_DATA
}
"""Automatic colour conversion graph node labels."""
CONVERSION_GRAPH_NODE_LABELS.update(
{
specification[1].lower(): specification[1]
for specification in CONVERSION_SPECIFICATIONS_DATA
}
)
@required("NetworkX")
def _build_graph() -> networkx.DiGraph: # type: ignore[name-defined] # noqa
"""
Build the automatic colour conversion graph.
Returns
-------
:class:`networkx.DiGraph`
Automatic colour conversion graph.
"""
import networkx as nx
graph = nx.DiGraph()
for specification in CONVERSION_SPECIFICATIONS:
graph.add_edge(
specification.source,
specification.target,
conversion_function=specification.conversion_function,
)
return graph
CONVERSION_GRAPH: ( # type: ignore[name-defined]
Optional[networkx.DiGraph] # noqa
) = None
"""Automatic colour conversion graph."""
@required("NetworkX")
def _conversion_path(source: str, target: str) -> List[Callable]:
"""
Return the conversion path from the source node to the target node in the
automatic colour conversion graph.
Parameters
----------
source
Source node.
target
Target node.
Returns
-------
:class:`list`
Conversion path from the source node to the target node, i.e. a list of
conversion function callables.
Examples
--------
>>> _conversion_path('cie lab', 'cct')
... # doctest: +ELLIPSIS
[<function Lab_to_XYZ at 0x...>, <function XYZ_to_xy at 0x...>, \
<function xy_to_UCS_uv at 0x...>, <function uv_to_CCT at 0x...>]
"""
import networkx as nx
global CONVERSION_GRAPH
if CONVERSION_GRAPH is None:
# Updating the :attr:`CONVERSION_GRAPH` attributes.
colour.graph.CONVERSION_GRAPH = CONVERSION_GRAPH = _build_graph()
path = nx.shortest_path(CONVERSION_GRAPH, source, target)
return [
CONVERSION_GRAPH.get_edge_data(a, b)["conversion_function"]
for a, b in zip(path[:-1], path[1:])
]
def _lower_order_function(callable_: Callable) -> Callable:
"""
Return the lower order function associated with given callable, i.e.
the function wrapped by a partial object.
Parameters
----------
callable_
Callable to return the lower order function.
Returns
-------
Callable
Lower order function or given callable if no lower order function
exists.
"""
return callable_.func if isinstance(callable_, partial) else callable_
def describe_conversion_path(
source: str,
target: str,
mode: Union[Literal["Short", "Long", "Extended"], str] = "Short",
width: Integer = 79,
padding: Integer = 3,
print_callable: Callable = print,
**kwargs: Any,
):
"""
Describe the conversion path from source colour representation to target
colour representation using the automatic colour conversion graph.
Parameters
----------
source
Source colour representation, i.e. the source node in the automatic
colour conversion graph.
target
Target colour representation, i.e. the target node in the automatic
colour conversion graph.
mode
Verbose mode: *Short* describes the conversion path, *Long* provides
details about the arguments, definitions signatures and output values,
*Extended* appends the definitions' documentation.
width
Message box width.
padding
Padding on each side of the message.
print_callable
Callable used to print the message box.
Other Parameters
----------------
kwargs
{:func:`colour.convert`},
See the documentation of the previously listed definition.
Examples
--------
>>> describe_conversion_path('Spectral Distribution', 'sRGB', width=75)
===========================================================================
* *
* [ Conversion Path ] *
* *
* "sd_to_XYZ" --> "XYZ_to_sRGB" *
* *
===========================================================================
"""
try: # pragma: no cover
signature_inspection = inspect.signature
except AttributeError: # pragma: no cover
signature_inspection = inspect.getargspec # type: ignore[assignment]
source, target = source.lower(), target.lower()
mode = validate_method(
mode,
["Short", "Long", "Extended"],
'"{0}" mode is invalid, it must be one of {1}!',
)
width = (79 + 2 + 2 * 3 - 4) if mode == "extended" else width
conversion_path = _conversion_path(source, target)
message_box(
"[ Conversion Path ]\n\n{}".format(
" --> ".join(
[
f'"{_lower_order_function(conversion_function).__name__}"'
for conversion_function in conversion_path
]
)
),
width,
padding,
print_callable,
)
for conversion_function in conversion_path:
conversion_function_name = _lower_order_function(
conversion_function
).__name__
# Filtering compatible keyword arguments passed directly and
# irrespective of any conversion function name.
filtered_kwargs = filter_kwargs(conversion_function, **kwargs)
# Filtering keyword arguments passed as dictionary with the
# conversion function name.
filtered_kwargs.update(kwargs.get(conversion_function_name, {}))
return_value = filtered_kwargs.pop("return", None)
if mode in ("long", "extended"):
signature = pformat(
signature_inspection(
_lower_order_function(conversion_function)
)
)
message = (
f'[ "{_lower_order_function(conversion_function).__name__}" ]\n\n'
f"[ Signature ]\n\n"
f"{signature}"
)
if filtered_kwargs:
message += (
f"\n\n[ Filtered Arguments ]\n\n{pformat(filtered_kwargs)}"
)
if mode in ("extended",):
docstring = textwrap.dedent(
str(_lower_order_function(conversion_function).__doc__)
).strip()
message += f"\n\n[ Documentation ]\n\n {docstring}"
if return_value is not None:
message += f"\n\n[ Conversion Output ]\n\n{return_value}"
message_box(message, width, padding, print_callable)
def convert(a: Any, source: str, target: str, **kwargs: Any) -> Any:
"""
Convert given object :math:`a` from source colour representation to target
colour representation using the automatic colour conversion graph.
The conversion is performed by finding the shortest path in a
`NetworkX <https://networkx.github.io/>`__ :class:`DiGraph` class instance.
The conversion path adopts the **'1'** domain-range scale and the object
:math:`a` is expected to be *soft* normalised accordingly. For example,
*CIE XYZ* tristimulus values arguments for use with the *CAM16* colour
appearance model should be in domain `[0, 1]` instead of the domain
`[0, 100]` used with the **'Reference'** domain-range scale. The arguments
are typically converted as follows:
- *Scalars* in domain-range `[0, 10]`, e.g *Munsell Value* are
scaled by *10*.
- *Percentages* in domain-range `[0, 100]` are scaled by *100*.
- *Degrees* in domain-range `[0, 360]` are scaled by *360*.
- *Integers* in domain-range `[0, 2**n -1]` where `n` is the bit
depth are scaled by *2**n -1*.
See the `Domain-Range Scales <../basics.html#domain-range-scales>`__ page
for more information.
Parameters
----------
a
Object :math:`a` to convert. If :math:`a` represents a reflectance,
transmittance or absorptance value, the expectation is that it is
viewed under *CIE Standard Illuminant D Series* *D65*. The illuminant
can be changed on a per-definition basis along the conversion path.
source
Source colour representation, i.e. the source node in the automatic
colour conversion graph.
target
Target colour representation, i.e. the target node in the automatic
colour conversion graph.
Other Parameters
----------------
kwargs
See the documentation of the supported conversion
definitions.
Arguments for the conversion definitions are passed as keyword
arguments whose names is those of the conversion definitions and values
set as dictionaries. For example, in the conversion from spectral
distribution to *sRGB* colourspace, passing arguments to the
:func:`colour.sd_to_XYZ` definition is done as follows::
convert(sd, 'Spectral Distribution', 'sRGB', sd_to_XYZ={\
'illuminant': SDS_ILLUMINANTS['FL2']})
It is also possible to pass keyword arguments directly to the various
conversion definitions irrespective of their name. This is
``dangerous`` and could cause unexpected behaviour, consider the
following conversion::
convert(sd, 'Spectral Distribution', 'sRGB', 'illuminant': \
SDS_ILLUMINANTS['FL2'])
Because both the :func:`colour.sd_to_XYZ` and
:func:`colour.XYZ_to_sRGB` definitions have an *illuminant* argument,
`SDS_ILLUMINANTS['FL2']` will be passed to both of them and will raise
an exception in the :func:`colour.XYZ_to_sRGB` definition. This will
be addressed in the future by either catching the exception and trying
a new time without the keyword argument or more elegantly via type
checking.
With that in mind, this mechanism offers some good benefits: For
example, it allows defining a conversion from *CIE XYZ* colourspace to
*n* different colour models while passing an illuminant argument but
without having to explicitly define all the explicit conversion
definition arguments::
a = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = CCS_ILLUMINANTS[\
'CIE 1931 2 Degree Standard Observer']['D65']
for model in ('CIE xyY', 'CIE Lab'):
convert(a, 'CIE XYZ', model, illuminant=illuminant)
Instead of::
for model in ('CIE xyY', 'CIE Lab'):
convert(a, 'CIE XYZ', model, XYZ_to_xyY={'illuminant': \
illuminant}, XYZ_to_Lab={'illuminant': illuminant})
Mixing both approaches is possible for the brevity benefits. It is made
possible because the keyword arguments directly passed are filtered
first and then the resulting dict is updated with the explicit
conversion definition arguments::
illuminant = CCS_ILLUMINANTS[\
'CIE 1931 2 Degree Standard Observer']['D65']
convert(sd, 'Spectral Distribution', 'sRGB', 'illuminant': \
SDS_ILLUMINANTS['FL2'], XYZ_to_sRGB={'illuminant': illuminant})
For inspection purposes, verbose is enabled by passing arguments to the
:func:`colour.describe_conversion_path` definition via the ``verbose``
keyword argument as follows::
convert(sd, 'Spectral Distribution', 'sRGB', \
verbose={'mode': 'Long'})
Returns
-------
Any
Converted object :math:`a`.
Warnings
--------
The domain-range scale is **'1'** and cannot be changed.
Notes
-----
- The **RGB** colour representation is assumed to be linear and
representing *scene-referred* imagery, i.e. **Scene-Referred RGB**
representation. To encode such *RGB* values as *output-referred*
(*display-referred*) imagery, i.e. encode the *RGB* values using an
encoding colour component transfer function (Encoding CCTF) /
opto-electronic transfer function (OETF), the
**Output-Referred RGB** representation must be used::
convert(RGB, 'Scene-Referred RGB', 'Output-Referred RGB')
Likewise, encoded *output-referred* *RGB* values can be decoded with
the **Scene-Referred RGB** representation::
convert(RGB, 'Output-Referred RGB', 'Scene-Referred RGB')
- Various defaults have been adopted compared to the low-level *Colour*
API:
- The default illuminant for the computation is
*CIE Standard Illuminant D Series* *D65*. It can be changed on a
per-definition basis along the conversion path.
- The default *RGB* colourspace primaries and whitepoint are that of
the *BT.709*/*sRGB* colourspace. They can be changed on a per
definition basis along the conversion path.
- When using **sRGB** as a source or target colour representation,
the convenient :func:`colour.sRGB_to_XYZ` and
:func:`colour.XYZ_to_sRGB` definitions are used, respectively.
Thus, decoding and encoding using the sRGB electro-optical transfer
function (EOTF) and its inverse will be applied by default.
- Most of the colour appearance models have defaults set according to
*IEC 61966-2-1:1999* viewing conditions, i.e. *sRGB* 64 Lux ambient
illumination, 80 :math:`cd/m^2`, adapting field luminance about
20% of a white object in the scene.
Examples
--------
>>> import numpy as np
>>> from colour import SDS_COLOURCHECKERS
>>> sd = SDS_COLOURCHECKERS['ColorChecker N Ohta']['dark skin']
>>> convert(sd, 'Spectral Distribution', 'sRGB',
... verbose={'mode': 'Short', 'width': 75})
... # doctest: +ELLIPSIS
===========================================================================
* *
* [ Conversion Path ] *
* *
* "sd_to_XYZ" --> "XYZ_to_sRGB" *
* *
===========================================================================
array([ 0.4567579..., 0.3098698..., 0.2486192...])
>>> illuminant = SDS_ILLUMINANTS['FL2']
>>> convert(sd, 'Spectral Distribution', 'sRGB',
... sd_to_XYZ={'illuminant': illuminant})
... # doctest: +ELLIPSIS
array([ 0.4792457..., 0.3167696..., 0.1736272...])
>>> a = np.array([0.45675795, 0.30986982, 0.24861924])
>>> convert(a, 'Output-Referred RGB', 'CAM16UCS')
... # doctest: +ELLIPSIS
array([ 0.3999481..., 0.0920655..., 0.0812752...])
>>> a = np.array([0.39994811, 0.09206558, 0.08127526])
>>> convert(a, 'CAM16UCS', 'sRGB', verbose={'mode': 'Short', 'width': 75})
... # doctest: +ELLIPSIS
===========================================================================
* *
* [ Conversion Path ] *
* *
* "UCS_Li2017_to_JMh_CAM16" --> "JMh_CAM16_to_CAM16" --> *
* "CAM16_to_XYZ" --> "XYZ_to_sRGB" *
* *
===========================================================================
array([ 0.4567576..., 0.3098826..., 0.2486222...])
"""
# TODO: Remove the following warning whenever the automatic colour
# conversion graph implementation is considered stable.
usage_warning(
'The "Automatic Colour Conversion Graph" is a beta feature, be '
"mindful of this when using it. Please report any unexpected "
"behaviour and do not hesitate to ask any questions should they arise."
"\nThis warning can be disabled with the "
'"colour.utilities.suppress_warnings" context manager as follows:\n'
"with colour.utilities.suppress_warnings(colour_usage_warnings=True): "
"\n convert(*args, **kwargs)"
)
source, target = source.lower(), target.lower()
conversion_path = _conversion_path(source, target)
verbose_kwargs = copy(kwargs)
for conversion_function in conversion_path:
conversion_function_name = _lower_order_function(
conversion_function
).__name__
# Filtering compatible keyword arguments passed directly and
# irrespective of any conversion function name.
filtered_kwargs = filter_kwargs(conversion_function, **kwargs)
# Filtering keyword arguments passed as dictionary with the
# conversion function name.
filtered_kwargs.update(kwargs.get(conversion_function_name, {}))
with domain_range_scale("1"):
a = conversion_function(a, **filtered_kwargs)
if conversion_function_name in verbose_kwargs:
verbose_kwargs[conversion_function_name]["return"] = a
else:
verbose_kwargs[conversion_function_name] = {"return": a}
if "verbose" in verbose_kwargs:
verbose_kwargs.update(verbose_kwargs.pop("verbose"))
describe_conversion_path(source, target, **verbose_kwargs)
return a
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class MomentumOptimizerTest(tf.test.TestCase):
def testBasic(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in tf.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in tf.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
def testTensorLearningRateAndMomentum(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
mom_opt = tf.train.MomentumOptimizer(
learning_rate=tf.constant(2.0), momentum=tf.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in tf.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in tf.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
def _dbParamsMom01(self):
"""Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momentum of 0.1.
These values record how a parameter vector of size 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
"""
db_grad = [[]] * 10
db_out = [[]] * 10
# pylint: disable=line-too-long
db_grad[0] = [0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018, 0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615]
db_out[0] = [-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018, -0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618]
db_grad[1] = [0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378, 0.5513742, 0.94687688, 0.16012503, 0.22159521]
db_out[1] = [-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884, -0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544]
db_grad[2] = [0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965, 0.31168157, 0.43203235, 0.16792089, 0.24644311]
db_out[2] = [-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978, -0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189]
db_grad[3] = [0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098, 0.81454384, 0.03848977, 0.89759839, 0.93665648]
db_out[3] = [-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105, -0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303]
db_grad[4] = [0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359, 0.69107032, 0.81897682, 0.5433259, 0.67860287]
db_out[4] = [-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165, -0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544]
db_grad[5] = [0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563, 0.84163809, 0.41172323, 0.83259648, 0.44941229]
db_out[5] = [-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094, -0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717]
db_grad[6] = [0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221, 0.73577434, 0.16014607, 0.57500273, 0.071136251]
db_out[6] = [-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685, -0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997]
db_grad[7] = [0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646, 0.74053431, 0.16033, 0.66625422, 0.73515922]
db_out[7] = [-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838, -0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418]
db_grad[8] = [0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039, 0.55561525, 0.22567581, 0.93331909, 0.29438227]
db_out[8] = [-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527, -0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781]
db_grad[9] = [0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893, 0.68593478, 0.50580865, 0.12602448, 0.093537711]
db_out[9] = [-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302, -0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295]
# pylint: enable=line-too-long
return db_grad, db_out
def testLikeDistBeliefMom01(self):
with self.test_session():
db_grad, db_out = self._dbParamsMom01()
num_samples = len(db_grad)
var0 = tf.Variable([0.0] * num_samples)
grads0 = tf.constant([0.0] * num_samples)
mom_opt = tf.train.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))
tf.initialize_all_variables().run()
for i in xrange(num_samples):
mom_update.run(feed_dict={grads0: db_grad[i]})
self.assertAllClose(np.array(db_out[i]), var0.eval())
def testSparse(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable(tf.zeros([4, 2], dtype=dtype))
var1 = tf.Variable(tf.constant(1.0, dtype, [4, 2]))
grads0 = tf.IndexedSlices(tf.constant([[.1, .1]], dtype=dtype),
tf.constant([1]),
tf.constant([4, 2]))
grads1 = tf.IndexedSlices(tf.constant([[.01, .01], [.01, .01]],
dtype=dtype),
tf.constant([2, 3]),
tf.constant([4, 2]))
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], var0.eval()[0])
self.assertAllClose([0, 0], var0.eval()[1])
self.assertAllClose([1, 1], var1.eval()[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0, 0]), slot0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([.1, .1]), slot0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([.01, .01]), slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([0, 0]), var0.eval()[0])
self.assertAllCloseAccordingToType(np.array([- (0.1 * 2.0),
- (0.1 * 2.0)]),
var0.eval()[1])
self.assertAllCloseAccordingToType(np.array([1.0 - (0.01 * 2.0),
1.0 - (0.01 * 2.0)]),
var1.eval()[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), slot0.eval()[0])
self.assertAllCloseAccordingToType(np.array([(0.9 * 0.1 + 0.1),
(0.9 * 0.1 + 0.1)]),
slot0.eval()[1])
self.assertAllCloseAccordingToType(np.array([(0.9 * 0.01 + 0.01),
(0.9 * 0.01 + 0.01)]),
slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), var0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
- (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([0.98 - ((0.9 * 0.01 + 0.01) * 2.0),
0.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval()[2])
def testSharing(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
mom_opt = tf.train.MomentumOptimizer(learning_rate=2.0, momentum=0.9)
mom_update1 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
mom_update2 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([1.0 - (0.1 * 2.0),
2.0 - (0.1 * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(np.array([3.0 - (0.01 * 2.0),
4.0 - (0.01 * 2.0)]),
var1.eval())
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)]),
var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)]),
var1.eval())
if __name__ == "__main__":
tf.test.main()
| |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import signal
from time import sleep
from mycroft.audio.services import AudioBackend
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
from mycroft.util import play_mp3, play_ogg, play_wav
import mimetypes
import re
from requests import Session
def find_mime(path):
mime = None
if path.startswith('http'):
response = Session().head(path, allow_redirects=True)
if 200 <= response.status_code < 300:
mime = response.headers['content-type']
if not mime:
mime = mimetypes.guess_type(path)[0]
# Remove any http address arguments
if not mime:
mime = mimetypes.guess_type(re.sub(r'\?.*$', '', path))[0]
if mime:
return mime.split('/')
else:
return (None, None)
class SimpleAudioService(AudioBackend):
"""
Simple Audio backend for both mpg123 and the ogg123 player.
This one is rather limited and only implements basic usage.
"""
def __init__(self, config, bus, name='simple'):
super().__init__(config, bus)
self.config = config
self.process = None
self.bus = bus
self.name = name
self._stop_signal = False
self._is_playing = False
self._paused = False
self.tracks = []
self.index = 0
self.supports_mime_hints = True
mimetypes.init()
self.bus.on('SimpleAudioServicePlay', self._play)
def supported_uris(self):
return ['file', 'http']
def clear_list(self):
self.tracks = []
def add_list(self, tracks):
self.tracks += tracks
LOG.info("Track list is " + str(tracks))
def _play(self, message):
""" Implementation specific async method to handle playback.
This allows mpg123 service to use the "next method as well
as basic play/stop.
"""
LOG.info('SimpleAudioService._play')
# Stop any existing audio playback
self._stop_running_process()
repeat = message.data.get('repeat', False)
self._is_playing = True
self._paused = False
if isinstance(self.tracks[self.index], list):
track = self.tracks[self.index][0]
mime = self.tracks[self.index][1]
mime = mime.split('/')
else: # Assume string
track = self.tracks[self.index]
mime = find_mime(track)
LOG.debug('Mime info: {}'.format(mime))
# Indicate to audio service which track is being played
if self._track_start_callback:
self._track_start_callback(track)
# Replace file:// uri's with normal paths
track = track.replace('file://', '')
try:
if 'mpeg' in mime[1]:
self.process = play_mp3(track)
elif 'ogg' in mime[1]:
self.process = play_ogg(track)
elif 'wav' in mime[1]:
self.process = play_wav(track)
else:
# If no mime info could be determined guess mp3
self.process = play_mp3(track)
except FileNotFoundError as e:
LOG.error('Couldn\'t play audio, {}'.format(repr(e)))
self.process = None
except Exception as e:
LOG.exception(repr(e))
self.process = None
# Wait for completion or stop request
while (self._is_process_running() and not self._stop_signal):
sleep(0.25)
if self._stop_signal:
self._stop_running_process()
self._is_playing = False
self._paused = False
return
else:
self.process = None
# if there are more tracks available play next
self.index += 1
if self.index < len(self.tracks) or repeat:
if self.index >= len(self.tracks):
self.index = 0
self.bus.emit(Message('SimpleAudioServicePlay',
{'repeat': repeat}))
else:
self._is_playing = False
self._paused = False
def play(self, repeat=False):
LOG.info('Call SimpleAudioServicePlay')
self.index = 0
self.bus.emit(Message('SimpleAudioServicePlay', {'repeat': repeat}))
def stop(self):
LOG.info('SimpleAudioServiceStop')
self._stop_signal = True
while self._is_playing:
sleep(0.1)
self._stop_signal = False
def _pause(self):
""" Pauses playback if possible.
Returns: (bool) New paused status:
"""
if self.process:
# Suspend the playback process
self.process.send_signal(signal.SIGSTOP)
return True # After pause the service is paused
else:
return False
def pause(self):
if not self._paused:
self._paused = self._pause()
def _resume(self):
""" Resumes playback if possible.
Returns: (bool) New paused status:
"""
if self.process:
# Resume the playback process
self.process.send_signal(signal.SIGCONT)
return False # After resume the service is no longer paused
else:
return True
def resume(self):
if self._paused:
# Resume the playback process
self._paused = self._resume()
def next(self):
# Terminate process to continue to next
self._stop_running_process()
def previous(self):
pass
def lower_volume(self):
if not self._paused:
self._pause() # poor-man's ducking
def restore_volume(self):
if not self._paused:
self._resume() # poor-man's unducking
def _is_process_running(self):
return self.process and self.process.poll() is None
def _stop_running_process(self):
if self._is_process_running():
if self._paused:
# The child process must be "unpaused" in order to be stopped
self._resume()
self.process.terminate()
countdown = 10
while self._is_process_running() and countdown > 0:
sleep(0.1)
countdown -= 1
if self._is_process_running():
# Failed to shutdown when asked nicely. Force the issue.
LOG.debug("Killing currently playing audio...")
self.process.kill()
self.process = None
def load_service(base_config, bus):
backends = base_config.get('backends', [])
services = [(b, backends[b]) for b in backends
if backends[b]['type'] == 'simple' and
backends[b].get('active', True)]
instances = [SimpleAudioService(s[1], bus, s[0]) for s in services]
return instances
| |
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_lstools.lstools_linux_generic import MbedLsToolsLinuxGeneric
class LinuxPortTestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
self.linux_generic = MbedLsToolsLinuxGeneric()
self.vfat_devices = [
"/dev/sdb on /media/usb0 type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sdd on /media/usb2 type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sde on /media/usb3 type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sdc on /media/usb1 type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)"
]
self.vfat_devices_ext = [
"/dev/sdb on /media/MBED_xxx type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sdd on /media/MBED___x type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sde on /media/MBED-xxx type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sdc on /media/MBED_x-x type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sda on /mnt/NUCLEO type vfat (rw,relatime,uid=999,fmask=0133,dmask=0022,codepage=437,iocharset=ascii,shortname=mixed,utf8,flush,errors=remount-ro,uhelper=ldm)",
"/dev/sdf on /mnt/NUCLEO_ type vfat (rw,relatime,uid=999,fmask=0133,dmask=0022,codepage=437,iocharset=ascii,shortname=mixed,utf8,flush,errors=remount-ro,uhelper=ldm)",
"/dev/sdg on /mnt/DAPLINK type vfat (rw,relatime,sync,uid=999,fmask=0022,dmask=0022,codepage=437,iocharset=ascii,shortname=mixed,errors=remount-ro,uhelper=ldm)",
"/dev/sdh on /mnt/DAPLINK_ type vfat (rw,relatime,sync,uid=999,fmask=0022,dmask=0022,codepage=437,iocharset=ascii,shortname=mixed,errors=remount-ro,uhelper=ldm)",
"/dev/sdi on /mnt/DAPLINK__ type vfat (rw,relatime,sync,uid=999,fmask=0022,dmask=0022,codepage=437,iocharset=ascii,shortname=mixed,errors=remount-ro,uhelper=ldm)",
]
# get_detected / get_not_detected (1 missing lpc1768)
self.tids = {
"0001": "LPC2368",
"0002": "LPC2368",
"0240": "FRDM_K64F", # Under test
"0245": "FRDM_K64F",
"1010": "LPC1768", # Under test
"0715": "NUCLEO_L053R8",
"0720": "NUCLEO_F401RE", # Under test
"0725": "NUCLEO_F030R8",
}
self.disk_list_1 = [
"total 0",
"lrwxrwxrwx 1 root 9 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM -> ../../sda",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM-part1 -> ../../sda1",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM-part2 -> ../../sda2",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM-part5 -> ../../sda5",
"lrwxrwxrwx 1 root 9 Dec 11 14:18 ata-TSSTcorpDVD-ROM_TS-H352C -> ../../sr0",
"lrwxrwxrwx 1 root 9 Jan 4 15:01 usb-MBED_FDi_sk_A000000001-0:0 -> ../../sdc",
"lrwxrwxrwx 1 root 9 Jan 4 15:01 usb-MBED_microcontroller_0240020152986E5EAF6693E6-0:0 -> ../../sdb",
"lrwxrwxrwx 1 root 9 Dec 11 14:18 wwn-0x5000cca30ccffb77 -> ../../sda",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 wwn-0x5000cca30ccffb77-part1 -> ../../sda1",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 wwn-0x5000cca30ccffb77-part2 -> ../../sda2",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 wwn-0x5000cca30ccffb77-part5 -> ../../sda5"
]
self.serial_list_1 = [
"total 0",
"lrwxrwxrwx 1 root 13 Jan 4 15:01 usb-MBED_MBED_CMSIS-DAP_0240020152986E5EAF6693E6-if01 -> ../../ttyACM1",
"lrwxrwxrwx 1 root 13 Jan 4 15:01 usb-MBED_MBED_CMSIS-DAP_A000000001-if01 -> ../../ttyACM0"
]
self.disk_list_rpi_1 = [
"total 0",
"lrwxrwxrwx 1 root 9 Mar 14 07:58 ata-VMware_Virtual_IDE_CDROM_Drive_10000000000000000001 -> ../../sr0",
"lrwxrwxrwx 1 root 9 Mar 15 08:35 usb-MBED_VFS_0240000028634e4500135006691700105f21000097969900-0:0 -> ../../sdb",
"lrwxrwxrwx 1 root 9 Mar 15 08:35 usb-MBED_VFS_0240000028884e450018700f6bf000338021000097969900-0:0 -> ../../sdc",
"lrwxrwxrwx 1 root 9 Mar 14 08:44 usb-MBED_VFS_0240000028884e45001f700f6bf000118021000097969900-0:0 -> ../../sdd",
"lrwxrwxrwx 1 root 9 Mar 15 08:35 usb-MBED_VFS_0240000028884e450036700f6bf000118021000097969900-0:0 -> ../../sde",
"lrwxrwxrwx 1 root 9 Mar 15 08:35 usb-MBED_VFS_0240000029164e45001b0012706e000df301000097969900-0:0 -> ../../sdd",
"lrwxrwxrwx 1 root 9 Mar 14 08:44 usb-MBED_VFS_0240000029164e45002f0012706e0006f301000097969900-0:0 -> ../../sdc"
]
self.serial_list_rpi_1 = [
"total 0",
"lrwxrwxrwx 1 root 13 Mar 15 08:35 usb-ARM_DAPLink_CMSIS-DAP_0240000028634e4500135006691700105f21000097969900-if01 -> ../../ttyACM0"
"lrwxrwxrwx 1 root 13 Mar 15 08:35 usb-ARM_DAPLink_CMSIS-DAP_0240000028884e450018700f6bf000338021000097969900-if01 -> ../../ttyACM1",
"lrwxrwxrwx 1 root 13 Mar 15 08:35 usb-ARM_DAPLink_CMSIS-DAP_0240000028884e450036700f6bf000118021000097969900-if01 -> ../../ttyACM3",
"lrwxrwxrwx 1 root 13 Mar 15 08:35 usb-ARM_DAPLink_CMSIS-DAP_0240000029164e45001b0012706e000df301000097969900-if01 -> ../../ttyACM2"
]
self.mount_list_rpi_1 = [
"/dev/sdd on /media/iot/DAPLINK1 type vfat (rw,nosuid,nodev,uid=1000,gid=1000,shortname=mixed,dmask=0077,utf8=1,showexec,flush,uhelper=udisks2)"
"/dev/sdb on /media/iot/DAPLINK2 type vfat (rw,nosuid,nodev,uid=1000,gid=1000,shortname=mixed,dmask=0077,utf8=1,showexec,flush,uhelper=udisks2)",
"/dev/sde on /media/iot/DAPLINK3 type vfat (rw,nosuid,nodev,uid=1000,gid=1000,shortname=mixed,dmask=0077,utf8=1,showexec,flush,uhelper=udisks2)",
"/dev/sdc on /media/iot/DAPLINK type vfat (rw,nosuid,nodev,uid=1000,gid=1000,shortname=mixed,dmask=0077,utf8=1,showexec,flush,uhelper=udisks2)"
]
self.mount_list_1 = [
"/dev/sdb on /media/usb0 type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sdc on /media/usb1 type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)"
]
# get_detected / get_not_detected (1 missing lpc1768, more platforms)
# +--------------+---------------------+------------+-------------+-------------------------+
# |platform_name |platform_name_unique |mount_point |serial_port |target_id |
# +--------------+---------------------+------------+-------------+-------------------------+
# |K64F |K64F[0] |/media/usb4 |/dev/ttyACM4 |0240020152A06E54AF5E93EC |
# |K64F |K64F[1] |/media/usb3 |/dev/ttyACM3 |02400201489A1E6CB564E3D4 |
# |K64F |K64F[2] |/media/usb0 |/dev/ttyACM1 |0240020152986E5EAF6693E6 |
# |LPC1768 |LPC1768[0] |/media/usb1 |/dev/ttyACM0 |A000000001 |
# |NUCLEO_F401RE |NUCLEO_F401RE[0] |/media/usb2 |/dev/ttyACM2 |07200200076165023804F31F |
# +--------------+---------------------+------------+-------------+-------------------------+
# After read from MBED.HTM:
# +--------------+---------------------+------------+-------------+------------------------------------------------------------------------+
# |platform_name |platform_name_unique |mount_point |serial_port |target_id |
# +--------------+---------------------+------------+-------------+------------------------------------------------------------------------+
# |K64F |K64F[0] |/media/usb4 |/dev/ttyACM4 |0240020152A06E54AF5E93EC |
# |K64F |K64F[1] |/media/usb3 |/dev/ttyACM3 |02400201489A1E6CB564E3D4 |
# |K64F |K64F[2] |/media/usb0 |/dev/ttyACM1 |0240020152986E5EAF6693E6 |
# |LPC1768 |LPC1768[0] |/media/usb1 |/dev/ttyACM0 |101000000000000000000002F7F0D9F98dbdc24b9e28ac87cfc4f23c4c57438d |
# |NUCLEO_F401RE |NUCLEO_F401RE[0] |/media/usb2 |/dev/ttyACM2 |07200200076165023804F31F |
# +--------------+---------------------+------------+-------------+------------------------------------------------------------------------+
self.disk_list_2 = [
"total 0",
"lrwxrwxrwx 1 root 9 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM -> ../../sda",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM-part1 -> ../../sda1",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM-part2 -> ../../sda2",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM-part5 -> ../../sda5",
"lrwxrwxrwx 1 root 9 Dec 11 14:18 ata-TSSTcorpDVD-ROM_TS-H352C -> ../../sr0",
"lrwxrwxrwx 1 root 9 Jan 4 15:01 usb-MBED_FDi_sk_A000000001-0:0 -> ../../sdc",
"lrwxrwxrwx 1 root 9 Jan 5 07:47 usb-MBED_microcontroller_02400201489A1E6CB564E3D4-0:0 -> ../../sde",
"lrwxrwxrwx 1 root 9 Jan 4 15:01 usb-MBED_microcontroller_0240020152986E5EAF6693E6-0:0 -> ../../sdb",
"lrwxrwxrwx 1 root 9 Jan 5 07:49 usb-MBED_microcontroller_0240020152A06E54AF5E93EC-0:0 -> ../../sdf",
"lrwxrwxrwx 1 root 9 Jan 5 07:47 usb-MBED_microcontroller_0672FF485649785087171742-0:0 -> ../../sdd",
"lrwxrwxrwx 1 root 9 Dec 11 14:18 wwn-0x5000cca30ccffb77 -> ../../sda",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 wwn-0x5000cca30ccffb77-part1 -> ../../sda1",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 wwn-0x5000cca30ccffb77-part2 -> ../../sda2",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 wwn-0x5000cca30ccffb77-part5 -> ../../sda5"
]
self.serial_list_2 = [
"total 0",
"lrwxrwxrwx 1 root 13 Jan 5 07:47 usb-MBED_MBED_CMSIS-DAP_02400201489A1E6CB564E3D4-if01 -> ../../ttyACM3",
"lrwxrwxrwx 1 root 13 Jan 4 15:01 usb-MBED_MBED_CMSIS-DAP_0240020152986E5EAF6693E6-if01 -> ../../ttyACM1",
"lrwxrwxrwx 1 root 13 Jan 5 07:49 usb-MBED_MBED_CMSIS-DAP_0240020152A06E54AF5E93EC-if01 -> ../../ttyACM4",
"lrwxrwxrwx 1 root 13 Jan 4 15:01 usb-MBED_MBED_CMSIS-DAP_A000000001-if01 -> ../../ttyACM0",
"lrwxrwxrwx 1 root 13 Jan 5 07:47 usb-STMicroelectronics_STM32_STLink_0672FF485649785087171742-if02 -> ../../ttyACM2"
]
self.mount_list_2 = [
"/dev/sdb on /media/usb0 type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sdc on /media/usb1 type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sdd on /media/usb2 type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sde on /media/usb3 type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)",
"/dev/sdf on /media/usb4 type vfat (rw,noexec,nodev,sync,noatime,nodiratime,gid=1000,uid=1000,dmask=000,fmask=000)"
]
self.disk_list_3 = [
"total 0",
"lrwxrwxrwx 1 root 13 Jan 5 09:41 usb-MBED_MBED_CMSIS-DAP_0240020152986E5EAF6693E6-if01 -> ../../ttyACM0",
"lrwxrwxrwx 1 root 13 Jan 5 10:00 usb-MBED_MBED_CMSIS-DAP_0240020152A06E54AF5E93EC-if01 -> ../../ttyACM3",
"lrwxrwxrwx 1 root 13 Jan 5 10:00 usb-MBED_MBED_CMSIS-DAP_107002001FE6E019E2190F91-if01 -> ../../ttyACM1",
"lrwxrwxrwx 1 root 13 Jan 5 10:00 usb-STMicroelectronics_STM32_STLink_0672FF485649785087171742-if02 -> ../../ttyACM2",
]
self.serial_list_3 = [
"total 0",
"lrwxrwxrwx 1 root 9 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM -> ../../sda",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM-part1 -> ../../sda1",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM-part2 -> ../../sda2",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 ata-HDS728080PLA380_40Y9028LEN_PFDB32S7S44XLM-part5 -> ../../sda5",
"lrwxrwxrwx 1 root 9 Dec 11 14:18 ata-TSSTcorpDVD-ROM_TS-H352C -> ../../sr0",
"lrwxrwxrwx 1 root 9 Jan 5 09:41 usb-MBED_microcontroller_0240020152986E5EAF6693E6-0:0 -> ../../sdb",
"lrwxrwxrwx 1 root 9 Jan 5 10:00 usb-MBED_microcontroller_0240020152A06E54AF5E93EC-0:0 -> ../../sde",
"lrwxrwxrwx 1 root 9 Jan 5 10:00 usb-MBED_microcontroller_0672FF485649785087171742-0:0 -> ../../sdd",
"lrwxrwxrwx 1 root 9 Jan 5 10:00 usb-MBED_microcontroller_107002001FE6E019E2190F91-0:0 -> ../../sdc",
"lrwxrwxrwx 1 root 9 Dec 11 14:18 wwn-0x5000cca30ccffb77 -> ../../sda",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 wwn-0x5000cca30ccffb77-part1 -> ../../sda1",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 wwn-0x5000cca30ccffb77-part2 -> ../../sda2",
"lrwxrwxrwx 1 root 10 Dec 11 14:18 wwn-0x5000cca30ccffb77-part5 -> ../../sda5",
]
self.disk_list_4 = [
"lrwxrwxrwx 1 root root 9 Mar 31 02:43 ata-VMware_Virtual_SATA_CDRW_Drive_00000000000000000001 -> ../../sr0",
"lrwxrwxrwx 1 root root 9 Mar 31 02:43 ata-VMware_Virtual_SATA_CDRW_Drive_01000000000000000001 -> ../../sr1",
"lrwxrwxrwx 1 root root 9 Apr 6 02:56 usb-MBED_VFS_0240000033514e45001f500585d40014e981000097969900-0:0 -> ../../sdb"
]
self.serial_list_4 = [
"lrwxrwxrwx 1 root root 13 Apr 6 02:56 pci-ARM_DAPLink_CMSIS-DAP_0240000033514e45001f500585d40014e981000097969900-if01 -> ../../ttyACM0"
]
self.mount_list_4 = [
"/dev/sdb on /media/przemek/DAPLINK type vfat (rw,nosuid,nodev,relatime,uid=1000,gid=1000,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,showexec,utf8,flush,errors=remount-ro,uhelper=udisks2)"
]
def tearDown(self):
pass
def test_os_support(self):
self.assertIn("LinuxGeneric", self.linux_generic.os_supported)
def test_get_mount_point_basic(self):
self.assertEqual('/media/usb0', self.linux_generic.get_mount_point('sdb', self.vfat_devices))
self.assertEqual('/media/usb2', self.linux_generic.get_mount_point('sdd', self.vfat_devices))
self.assertEqual('/media/usb3', self.linux_generic.get_mount_point('sde', self.vfat_devices))
self.assertEqual('/media/usb1', self.linux_generic.get_mount_point('sdc', self.vfat_devices))
def test_get_mount_point_ext(self):
self.assertEqual('/media/MBED_xxx', self.linux_generic.get_mount_point('sdb', self.vfat_devices_ext))
self.assertEqual('/media/MBED___x', self.linux_generic.get_mount_point('sdd', self.vfat_devices_ext))
self.assertEqual('/media/MBED-xxx', self.linux_generic.get_mount_point('sde', self.vfat_devices_ext))
self.assertEqual('/media/MBED_x-x', self.linux_generic.get_mount_point('sdc', self.vfat_devices_ext))
self.assertEqual('/mnt/NUCLEO', self.linux_generic.get_mount_point('sda', self.vfat_devices_ext))
self.assertEqual('/mnt/NUCLEO_', self.linux_generic.get_mount_point('sdf', self.vfat_devices_ext))
self.assertEqual('/mnt/DAPLINK', self.linux_generic.get_mount_point('sdg', self.vfat_devices_ext))
self.assertEqual('/mnt/DAPLINK_', self.linux_generic.get_mount_point('sdh', self.vfat_devices_ext))
self.assertEqual('/mnt/DAPLINK__', self.linux_generic.get_mount_point('sdi', self.vfat_devices_ext))
def test_get_dev_name(self):
# With USB- prefix
self.assertEqual('ttyACM0', self.linux_generic.get_dev_name('usb-MBED_MBED_CMSIS-DAP_02400201489A1E6CB564E3D4-if01 -> ../../ttyACM0'))
self.assertEqual('ttyACM2', self.linux_generic.get_dev_name('usb-STMicroelectronics_STM32_STLink_0672FF485649785087171742-if02 -> ../../ttyACM2'))
self.assertEqual('ttyACM3', self.linux_generic.get_dev_name('usb-MBED_MBED_CMSIS-DAP_0240020152986E5EAF6693E6-if01 -> ../../ttyACM3'))
self.assertEqual('ttyACM2', self.linux_generic.get_dev_name('/dev/ttyACM2'))
self.assertEqual('sdb', self.linux_generic.get_dev_name('usb-MBED_microcontroller_02400201489A1E6CB564E3D4-0:0 -> ../../sdb'))
self.assertEqual('sde', self.linux_generic.get_dev_name('usb-MBED_microcontroller_0240020152986E5EAF6693E6-0:0 -> ../../sde'))
self.assertEqual('sdd', self.linux_generic.get_dev_name('usb-MBED_microcontroller_0672FF485649785087171742-0:0 -> ../../sdd'))
self.assertEqual('sdb', self.linux_generic.get_dev_name('usb-MBED_VFS_0240000033514e45001f500585d40014e981000097969900-0:0 -> ../../sdb'))
# With PCI- prefix
self.assertEqual('ttyACM0', self.linux_generic.get_dev_name('pci-ARM_DAPLink_CMSIS-DAP_0240000033514e45001f500585d40014e981000097969900-if01 -> ../../ttyACM0'))
def test_get_detected_1_k64f(self):
# get_detected(self, tids, disk_list, serial_list, mount_list)
mbed_det = self.linux_generic.get_detected(self.tids,
self.disk_list_1,
self.serial_list_1,
self.mount_list_1)
self.assertEqual(1, len(mbed_det))
self.assertIn([
"FRDM_K64F",
"sdb",
"/media/usb0",
"/dev/ttyACM1",
"usb-MBED_microcontroller_0240020152986E5EAF6693E6-0:0 -> ../../sdb"
],
mbed_det)
def test_get_not_detected_1_unknown_lpc1768(self):
# LPC1768 with weird target id like this:
mbed_ndet = self.linux_generic.get_not_detected(self.tids,
self.disk_list_1,
self.serial_list_1,
self.mount_list_1)
self.assertEqual(1, len(mbed_ndet))
self.assertIn([
None,
"sdc",
"/media/usb1",
"/dev/ttyACM0",
"usb-MBED_FDi_sk_A000000001-0:0 -> ../../sdc"
],
mbed_ndet)
def test_get_detected_2_k64f(self):
# get_detected(self, tids, disk_list, serial_list, mount_list)
mbed_det = self.linux_generic.get_detected(self.tids,
self.disk_list_2,
self.serial_list_2,
self.mount_list_2)
self.assertEqual(3, len(mbed_det))
self.assertIn([
"FRDM_K64F",
"sdf",
"/media/usb4",
"/dev/ttyACM4",
"usb-MBED_microcontroller_0240020152A06E54AF5E93EC-0:0 -> ../../sdf"
],
mbed_det)
self.assertIn([
"FRDM_K64F",
"sde",
"/media/usb3",
"/dev/ttyACM3",
"usb-MBED_microcontroller_02400201489A1E6CB564E3D4-0:0 -> ../../sde"
],
mbed_det)
self.assertIn([
"FRDM_K64F",
"sdb",
"/media/usb0",
"/dev/ttyACM1",
"usb-MBED_microcontroller_0240020152986E5EAF6693E6-0:0 -> ../../sdb"
],
mbed_det)
def test_get_detected_2_k64f(self):
# get_detected(self, tids, disk_list, serial_list, mount_list)
mbed_det = self.linux_generic.get_detected(self.tids,
self.disk_list_4,
self.serial_list_4,
self.mount_list_4)
self.assertEqual(1, len(mbed_det))
self.assertIn([
"FRDM_K64F",
"sdb",
"/media/przemek/DAPLINK",
"/dev/ttyACM0",
"usb-MBED_VFS_0240000033514e45001f500585d40014e981000097969900-0:0 -> ../../sdb"
],
mbed_det)
def test_get_not_detected_2_unknown_lpc1768_stf401(self):
# LPC1768 with weird target id like this:
mbed_ndet = self.linux_generic.get_not_detected(self.tids,
self.disk_list_2,
self.serial_list_2,
self.mount_list_2)
self.assertEqual(2, len(mbed_ndet))
self.assertIn([
None,
"sdc",
"/media/usb1",
"/dev/ttyACM0",
"usb-MBED_FDi_sk_A000000001-0:0 -> ../../sdc"
],
mbed_ndet)
self.assertIn([
None,
"sdd",
"/media/usb2",
"/dev/ttyACM2",
"usb-MBED_microcontroller_0672FF485649785087171742-0:0 -> ../../sdd"
],
mbed_ndet)
def test_get_disk_hex_ids_1(self):
disk_hex_ids = self.linux_generic.get_disk_hex_ids(self.disk_list_1)
self.assertEqual(2, len(disk_hex_ids))
hex_keys = disk_hex_ids.keys()
self.assertEqual(2, len(hex_keys))
self.assertIn("A000000001", hex_keys)
self.assertIn("0240020152986E5EAF6693E6", hex_keys)
hex_values = disk_hex_ids.values()
self.assertEqual(2, len(hex_values))
self.assertIn("usb-MBED_FDi_sk_A000000001-0:0 -> ../../sdc", hex_values)
self.assertIn("usb-MBED_microcontroller_0240020152986E5EAF6693E6-0:0 -> ../../sdb", hex_values)
def test_get_rpi_disk_hex_ids_1(self):
disk_hex_ids = self.linux_generic.get_disk_hex_ids(self.disk_list_rpi_1)
self.assertEqual(6, len(disk_hex_ids))
hex_keys = disk_hex_ids.keys()
self.assertEqual(6, len(hex_keys))
self.assertIn("0240000028634e4500135006691700105f21000097969900", hex_keys)
self.assertIn("0240000028884e450018700f6bf000338021000097969900", hex_keys)
self.assertIn("0240000028884e45001f700f6bf000118021000097969900", hex_keys)
self.assertIn("0240000028884e450036700f6bf000118021000097969900", hex_keys)
self.assertIn("0240000029164e45001b0012706e000df301000097969900", hex_keys)
self.assertIn("0240000029164e45002f0012706e0006f301000097969900", hex_keys)
hex_values = disk_hex_ids.values()
self.assertEqual(6, len(hex_values))
self.assertIn("usb-MBED_VFS_0240000028634e4500135006691700105f21000097969900-0:0 -> ../../sdb", hex_values)
self.assertIn("usb-MBED_VFS_0240000028884e450018700f6bf000338021000097969900-0:0 -> ../../sdc", hex_values)
self.assertIn("usb-MBED_VFS_0240000028884e45001f700f6bf000118021000097969900-0:0 -> ../../sdd", hex_values)
self.assertIn("usb-MBED_VFS_0240000028884e450036700f6bf000118021000097969900-0:0 -> ../../sde", hex_values)
self.assertIn("usb-MBED_VFS_0240000029164e45001b0012706e000df301000097969900-0:0 -> ../../sdd", hex_values)
self.assertIn("usb-MBED_VFS_0240000029164e45002f0012706e0006f301000097969900-0:0 -> ../../sdc", hex_values)
def test_get_disk_hex_ids_2(self):
disk_hex_ids = self.linux_generic.get_disk_hex_ids(self.disk_list_2)
self.assertEqual(5, len(disk_hex_ids))
# Checking for scanned target ids (in dict keys)
hex_keys = disk_hex_ids.keys()
self.assertEqual(5, len(hex_keys))
self.assertIn("A000000001", hex_keys)
self.assertIn("0240020152A06E54AF5E93EC", hex_keys)
self.assertIn("0672FF485649785087171742", hex_keys)
self.assertIn("02400201489A1E6CB564E3D4", hex_keys)
self.assertIn("0240020152986E5EAF6693E6", hex_keys)
hex_values = disk_hex_ids.values()
self.assertEqual(5, len(hex_values))
self.assertIn("usb-MBED_FDi_sk_A000000001-0:0 -> ../../sdc", hex_values)
self.assertIn("usb-MBED_microcontroller_0240020152A06E54AF5E93EC-0:0 -> ../../sdf", hex_values)
self.assertIn("usb-MBED_microcontroller_0672FF485649785087171742-0:0 -> ../../sdd", hex_values)
self.assertIn("usb-MBED_microcontroller_02400201489A1E6CB564E3D4-0:0 -> ../../sde", hex_values)
self.assertIn("usb-MBED_microcontroller_0240020152986E5EAF6693E6-0:0 -> ../../sdb", hex_values)
def test_get_disk_hex_ids_4(self):
disk_hex_ids = self.linux_generic.get_disk_hex_ids(self.disk_list_4)
self.assertEqual(1, len(disk_hex_ids))
# Checking for scanned target ids (in dict keys)
hex_keys = disk_hex_ids.keys()
self.assertIn("0240000033514e45001f500585d40014e981000097969900", hex_keys)
hex_values = disk_hex_ids.values()
self.assertIn("usb-MBED_VFS_0240000033514e45001f500585d40014e981000097969900-0:0 -> ../../sdb", hex_values)
def test_get_dev_by_id_process_ret_0(self):
id_disks = self.linux_generic.get_dev_by_id_process(self.disk_list_3, 0)
id_serial = self.linux_generic.get_dev_by_id_process(self.serial_list_3, 0)
self.assertEqual(4, len(id_disks))
self.assertEqual(13, len(id_serial))
self.assertNotIn("total 0", id_disks)
self.assertNotIn("Total 0", id_disks)
self.assertNotIn("total 0", id_serial)
self.assertNotIn("Total 0", id_serial)
def test_get_dev_by_id_process_ret_non_zero(self):
id_disks = self.linux_generic.get_dev_by_id_process(self.disk_list_3, -1)
id_serial = self.linux_generic.get_dev_by_id_process(self.serial_list_3, -1)
self.assertEqual([], id_disks)
self.assertEqual([], id_serial)
if __name__ == '__main__':
unittest.main()
| |
"""This class stores all of the samples for training. It is able to
construct randomly selected batches of phi's from the stored history.
It allocates more memory than necessary, then shifts all of the
data back to 0 when the samples reach the end of the allocated memory.
"""
import numpy as np
import pyximport
pyximport.install(setup_args={'include_dirs': np.get_include()})
import shift
import time
import theano
floatX = theano.config.floatX
class DataSet(object):
""" Class represents a data set that stores a fixed-length history.
"""
def __init__(self, width, height, max_steps=1000, phi_length=4,
capacity=None,
reward_weight=0., reward_weight_horizon=20,
reward_weight_decay=0.95):
""" Construct a DataSet.
Arguments:
width,height - image size
max_steps - the length of history to store.
phi_length - number of images to concatenate into a state.
capacity - amount of memory to allocate (just for debugging.)
reward_weight - In random_batch, at what probability to peek a
reward sample (in addition to random sample.)
reward_weight_horizon - In random_batch, how many steps before a
reward the samples receive the special reward weight.
reward_weight_decay - In random_batch, by how much to decay the
reward weight for every step going backward from the point of
reward.
"""
self.count = 0
self.max_steps = max_steps
self.phi_length = phi_length
if capacity == None:
self.capacity = max_steps + int(np.ceil(max_steps * .1))
else:
self.capacity = capacity
self.states = np.zeros((self.capacity, height, width), dtype='uint8')
self.actions = np.zeros(self.capacity, dtype='int32')
self.rewards = np.zeros(self.capacity, dtype=floatX)
self.terminal = np.zeros(self.capacity, dtype='bool')
self.reward_weight = reward_weight
self.reward_weight_horizon = reward_weight_horizon
self.weights = np.zeros(reward_weight_horizon)
for i in range(1,reward_weight_horizon):
self.weights[i] = reward_weight_decay * self.weights[i-1]
def save(self, path):
# saving the enitre data set is huge so only take rewards and their
# horizons
states = []
actions = []
rewards = []
terminal = []
for end_index in np.where(self.rewards)[0]:
start_index = end_index
end_index += 1
while True:
if start_index <= 0:
break
if start_index <= end_index - self.reward_weight_horizon - self.phi_length:
break
if self.rewards[start_index-1]:
break
start_index -= 1
start_index = max(0, start_index)
states.append(self.states[start_index:end_index,:,:])
actions.append(self.actions[start_index:end_index])
rewards.append(self.rewards[start_index:end_index])
terminal.append(self.terminal[start_index:end_index])
if rewards:
# compact data set
states = np.concatenate(states)
actions = np.concatenate(actions)
rewards = np.concatenate(rewards)
terminal = np.concatenate(terminal)
# save rewards and horizons
np.savez_compressed(path,
states=states,
actions=actions,
rewards=rewards,
termianl=terminal)
def load(self, path):
d = np.load(path)
n = np.where(d['termianl'])[0][-1] + 1
n = min(n, self.capacity)
print "pre loading", n
self.states[:n,:,:] = d['states'][:n,:,:]
self.actions[:n] = d['actions'][:n]
self.rewards[:n] = d['rewards'][:n]
self.terminal[:n] = d['termianl'][:n]
self.count = n
def _min_index(self):
return max(0, self.count - self.max_steps)
def _max_index(self):
return self.count - (self.phi_length + 1)
def __len__(self):
""" Return the total number of avaible data items. """
return max(0, (self._max_index() - self._min_index()) + 1)
def add_sample(self, state, action, reward, terminal):
self.states[self.count, ...] = state
self.actions[self.count] = action
self.rewards[self.count] = reward
self.terminal[self.count] = terminal
self.count += 1
# Shift the final max_steps back to the beginning.
if self.count == self.capacity:
roll_amount = self.capacity - self.max_steps
shift.shift3d_uint8(self.states, roll_amount)
self.actions = np.roll(self.actions, -roll_amount)
self.rewards = np.roll(self.rewards, -roll_amount)
self.terminal = np.roll(self.terminal, -roll_amount)
self.count = self.max_steps
def single_episode(self, start, end):
""" Make sure that a possible phi does not cross a trial boundary.
"""
return np.alltrue(np.logical_not(self.terminal[start:end]))
def last_phi(self):
"""
Return the most recent phi.
"""
phi = self._make_phi(self.count - self.phi_length)
return np.array(phi, dtype=floatX)
def phi(self, state):
"""
Return a phi based on the latest image, by grabbing enough
history from the data set to fill it out.
"""
phi = np.empty((self.phi_length,
self.states.shape[1],
self.states.shape[2]),
dtype=floatX)
phi[0:(self.phi_length-1), ...] = self.last_phi()[1::]
phi[self.phi_length-1, ...] = state
return phi
def _make_phi(self, index):
end_index = index + self.phi_length - 1
#assert self.single_episode(index, end_index)
return self.states[index:end_index + 1, ...]
def _empty_batch(self, batch_size):
# Set aside memory for the batch
states = np.empty((batch_size, self.phi_length,
self.states.shape[1], self.states.shape[2]),
dtype=floatX)
actions = np.empty((batch_size, 1), dtype='int32')
rewards = np.empty((batch_size, 1), dtype=floatX)
terminals = np.empty((batch_size, 1), dtype=bool)
next_states = np.empty((batch_size, self.phi_length,
self.states.shape[1],
self.states.shape[2]), dtype=floatX)
return states, actions, rewards, terminals, next_states
def batch_iterator(self, batch_size):
""" Generator for iterating over all valid batches. """
index = self._min_index()
batch_count = 0
states, actions, rewards, terminals, next_states = \
self._empty_batch(batch_size)
while index <= self._max_index():
end_index = index + self.phi_length - 1
if self.single_episode(index, end_index):
states[batch_count, ...] = self._make_phi(index)
actions[batch_count, 0] = self.actions[end_index]
rewards[batch_count, 0] = self.rewards[end_index]
terminals[batch_count, 0] = self.terminal[end_index]
next_states[batch_count, ...] = self._make_phi(index + 1)
batch_count += 1
index += 1
if batch_count == batch_size:
yield states, actions, rewards, terminals, next_states
batch_count = 0
states, actions, rewards, terminals, next_states = \
self._empty_batch(batch_size)
def random_batch(self, batch_size):
count = 0
states, actions, rewards, terminals, next_states = \
self._empty_batch(batch_size)
# collect random reward samples and samples that preceded rewards
reward_samples = 0
if self.reward_weight > 0.:
rewards_samples_loc = np.where(self.rewards)[0]
n1 = len(rewards_samples_loc)
if n1:
samples_idx = np.empty(n1*self.reward_weight_horizon)
for end_index in rewards_samples_loc:
w = np.random.random(self.reward_weight_horizon) < self.weights
w = np.where(w)[0]
if w:
w = end_index - w
w = w[w >= 0]
n = reward_samples + len(w)
samples_idx[reward_samples:n] = w
reward_samples = n
# Grab random samples until we have enough
while count < batch_size:
if reward_samples:
reward_samples -= 1
end_index = samples_idx[reward_samples]
index = end_index - (self.phi_length - 1)
if index < self._min_index() or index > self._max_index():
continue
else:
index = np.random.randint(self._min_index(), self._max_index()+1)
end_index = index + self.phi_length - 1
if self.single_episode(index, end_index):
states[count, ...] = self._make_phi(index)
actions[count, 0] = self.actions[end_index]
rewards[count, 0] = self.rewards[end_index]
terminals[count, 0] = self.terminal[end_index]
next_states[count, ...] = self._make_phi(index + 1)
count += 1
return states, actions, rewards, next_states, terminals
# TESTING CODE BELOW THIS POINT...
def simple_tests():
np.random.seed(222)
dataset = DataSet(width=2, height=3, max_steps=6, phi_length=4, capacity=7)
for i in range(10):
img = np.random.randint(0, 256, size=(3, 2))
action = np.random.randint(16)
reward = np.random.random()
terminal = False
if np.random.random() < .05:
terminal = True
print 'img', img
dataset.add_sample(img, action, reward, terminal)
print "S", dataset.states
print "A", dataset.actions
print "R", dataset.rewards
print "T", dataset.terminal
print "COUNT", "CAPACITY", dataset.count, dataset.capacity
print
print "LAST PHI", dataset.last_phi()
print
print 'BATCH', dataset.random_batch(2)
def speed_tests():
dataset = DataSet(width=80, height=80, max_steps=20000, phi_length=4)
img = np.random.randint(0, 256, size=(80, 80))
action = np.random.randint(16)
reward = np.random.random()
start = time.time()
for i in range(100000):
terminal = False
if np.random.random() < .05:
terminal = True
dataset.add_sample(img, action, reward, terminal)
print "samples per second: ", 100000 / (time.time() - start)
start = time.time()
for i in range(200):
a = dataset.random_batch(32)
print "batches per second: ", 200 / (time.time() - start)
print dataset.last_phi()
def trivial_tests():
dataset = DataSet(width=2, height=1, max_steps=3, phi_length=2)
img1 = np.array([[1, 1]], dtype='uint8')
img2 = np.array([[2, 2]], dtype='uint8')
img3 = np.array([[3, 3]], dtype='uint8')
dataset.add_sample(img1, 1, 1, False)
dataset.add_sample(img2, 2, 2, False)
dataset.add_sample(img3, 2, 2, True)
print "last", dataset.last_phi()
print "random", dataset.random_batch(1)
def max_size_tests():
dataset1 = DataSet(width=3, height=4, max_steps=10, phi_length=4)
dataset2 = DataSet(width=3, height=4, max_steps=1000, phi_length=4)
for i in range(100):
img = np.random.randint(0, 256, size=(4, 3))
action = np.random.randint(16)
reward = np.random.random()
terminal = False
if np.random.random() < .05:
terminal = True
dataset1.add_sample(img, action, reward, terminal)
dataset2.add_sample(img, action, reward, terminal)
np.testing.assert_array_almost_equal(dataset1.last_phi(),
dataset2.last_phi())
print "passed"
def test_iterator():
dataset = DataSet(width=2, height=1, max_steps=10, phi_length=2)
img1 = np.array([[1, 1]], dtype='uint8')
img2 = np.array([[2, 2]], dtype='uint8')
img3 = np.array([[3, 3]], dtype='uint8')
img4 = np.array([[3, 3]], dtype='uint8')
dataset.add_sample(img1, 1, 1, False)
dataset.add_sample(img2, 2, 2, False)
dataset.add_sample(img3, 3, 3, False)
dataset.add_sample(img4, 4, 4, True)
for s, a, r, t, ns in dataset.batch_iterator(2):
print "s ", s, "a ",a, "r ",r,"t ", t,"ns ", ns
def test_random_batch():
dataset1 = DataSet(width=3, height=4, max_steps=50, phi_length=4)
dataset2 = DataSet(width=3, height=4, max_steps=50, phi_length=4,
capacity=2000)
np.random.seed(hash(time.time()))
for i in range(100):
img = np.random.randint(0, 256, size=(4, 3))
action = np.random.randint(16)
reward = np.random.random()
terminal = False
if np.random.random() < .05:
terminal = True
dataset1.add_sample(img, action, reward, terminal)
dataset2.add_sample(img, action, reward, terminal)
if i > 10:
np.random.seed(i*11 * i)
states1, actions1, rewards1, next_states1, terminals1 = \
dataset1.random_batch(10)
np.random.seed(i*11 * i)
states2, actions2, rewards2, next_states2, terminals2 = \
dataset2.random_batch(10)
np.testing.assert_array_almost_equal(states1, states2)
np.testing.assert_array_almost_equal(actions1, actions2)
np.testing.assert_array_almost_equal(rewards1, rewards2)
np.testing.assert_array_almost_equal(next_states1, next_states2)
np.testing.assert_array_almost_equal(terminals1, terminals2)
# if not np.array_equal(states1, states2):
# print states1,"\n", states2
# if not np.array_equal(actions1, actions2):
# print actions1, "\n",actions2
# if not np.array_equal(rewards1, rewards2):
# print rewards1, "\n",rewards2
# if not np.array_equal(next_states1, next_states2):
# print next_states1, "\n",next_states2
# if not np.array_equal(terminals1, terminals2):
# print terminals1, "\n",terminals2
np.random.seed(hash(time.time()))
def test_memory_usage_ok():
import memory_profiler
dataset = DataSet(width=80, height=80, max_steps=100000, phi_length=4)
last = time.time()
for i in xrange(1000000000):
if (i % 100000) == 0:
print i
dataset.add_sample(np.random.random((80, 80)), 1, 1, False)
if i > 200000:
states, actions, rewards, next_states, terminals = \
dataset.random_batch(32)
if (i % 10007) == 0:
print time.time() - last
mem_usage = memory_profiler.memory_usage(-1)
print len(dataset), mem_usage
last = time.time()
def main():
#speed_tests()
test_memory_usage_ok()
#test_random_batch()
#max_size_tests()
#simple_tests()
#test_iterator()
if __name__ == "__main__":
main()
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorTime}.
"""
__metaclass__ = type
import os, signal, time
from twisted.internet.defer import TimeoutError, Deferred, gatherResults
from twisted.internet.protocol import ClientFactory, Protocol
from twisted.trial.unittest import TestCase, SkipTest
from twisted.python.runtime import platform
from twisted.python.reflect import namedAny, fullyQualifiedName
from twisted.python import log
from twisted.python.failure import Failure
# Access private APIs.
if platform.isWindows():
process = None
else:
from twisted.internet import process
def needsRunningReactor(reactor, thunk):
"""
Various functions within these tests need an already-running reactor at
some point. They need to stop the reactor when the test has completed, and
that means calling reactor.stop(). However, reactor.stop() raises an
exception if the reactor isn't already running, so if the L{Deferred} that
a particular API under test returns fires synchronously (as especially an
endpoint's C{connect()} method may do, if the connect is to a local
interface address) then the test won't be able to stop the reactor being
tested and finish. So this calls C{thunk} only once C{reactor} is running.
(This is just an alias for
L{twisted.internet.interfaces.IReactorCore.callWhenRunning} on the given
reactor parameter, in order to centrally reference the above paragraph and
repeating it everywhere as a comment.)
@param reactor: the L{twisted.internet.interfaces.IReactorCore} under test
@param thunk: a 0-argument callable, which eventually finishes the test in
question, probably in a L{Deferred} callback.
"""
reactor.callWhenRunning(thunk)
class ConnectableProtocol(Protocol):
"""
A protocol to be used with L{runProtocolsWithReactor}.
The protocol and its pair should eventually disconnect from each other.
@ivar reactor: The reactor used in this test.
@ivar disconnectReason: The L{Failure} passed to C{connectionLost}.
@ivar _done: A L{Deferred} which will be fired when the connection is
lost.
"""
disconnectReason = None
def _setAttributes(self, reactor, done):
"""
Set attributes on the protocol that are known only externally; this
will be called by L{runProtocolsWithReactor} when this protocol is
instantiated.
@param reactor: The reactor used in this test.
@param done: A L{Deferred} which will be fired when the connection is
lost.
"""
self.reactor = reactor
self._done = done
def connectionLost(self, reason):
self.disconnectReason = reason
self._done.callback(None)
del self._done
class EndpointCreator:
"""
Create client and server endpoints that know how to connect to each other.
"""
def server(self, reactor):
"""
Return an object providing C{IStreamServerEndpoint} for use in creating
a server to use to establish the connection type to be tested.
"""
raise NotImplementedError()
def client(self, reactor, serverAddress):
"""
Return an object providing C{IStreamClientEndpoint} for use in creating
a client to use to establish the connection type to be tested.
"""
raise NotImplementedError()
class _SingleProtocolFactory(ClientFactory):
"""
Factory to be used by L{runProtocolsWithReactor}.
It always returns the same protocol (i.e. is intended for only a single connection).
"""
def __init__(self, protocol):
self._protocol = protocol
def buildProtocol(self, addr):
return self._protocol
def runProtocolsWithReactor(reactorBuilder, serverProtocol, clientProtocol,
endpointCreator):
"""
Connect two protocols using endpoints and a new reactor instance.
A new reactor will be created and run, with the client and server protocol
instances connected to each other using the given endpoint creator. The
protocols should run through some set of tests, then disconnect; when both
have disconnected the reactor will be stopped and the function will
return.
@param reactorBuilder: A L{ReactorBuilder} instance.
@param serverProtocol: A L{ConnectableProtocol} that will be the server.
@param clientProtocol: A L{ConnectableProtocol} that will be the client.
@param endpointCreator: An instance of L{EndpointCreator}.
@return: The reactor run by this test.
"""
reactor = reactorBuilder.buildReactor()
serverProtocol._setAttributes(reactor, Deferred())
clientProtocol._setAttributes(reactor, Deferred())
serverFactory = _SingleProtocolFactory(serverProtocol)
clientFactory = _SingleProtocolFactory(clientProtocol)
# Listen on a port:
serverEndpoint = endpointCreator.server(reactor)
d = serverEndpoint.listen(serverFactory)
# Connect to the port:
def gotPort(p):
clientEndpoint = endpointCreator.client(
reactor, p.getHost())
return clientEndpoint.connect(clientFactory)
d.addCallback(gotPort)
# Stop reactor when both connections are lost:
def failed(result):
log.err(result, "Connection setup failed.")
disconnected = gatherResults([serverProtocol._done, clientProtocol._done])
d.addCallback(lambda _: disconnected)
d.addErrback(failed)
d.addCallback(lambda _: needsRunningReactor(reactor, reactor.stop))
reactorBuilder.runReactor(reactor)
return reactor
class ReactorBuilder:
"""
L{TestCase} mixin which provides a reactor-creation API. This mixin
defines C{setUp} and C{tearDown}, so mix it in before L{TestCase} or call
its methods from the overridden ones in the subclass.
@cvar skippedReactors: A dict mapping FQPN strings of reactors for
which the tests defined by this class will be skipped to strings
giving the skip message.
@cvar requiredInterfaces: A C{list} of interfaces which the reactor must
provide or these tests will be skipped. The default, C{None}, means
that no interfaces are required.
@ivar reactorFactory: A no-argument callable which returns the reactor to
use for testing.
@ivar originalHandler: The SIGCHLD handler which was installed when setUp
ran and which will be re-installed when tearDown runs.
@ivar _reactors: A list of FQPN strings giving the reactors for which
TestCases will be created.
"""
_reactors = [
# Select works everywhere
"twisted.internet.selectreactor.SelectReactor",
]
if platform.isWindows():
# PortableGtkReactor is only really interesting on Windows,
# but not really Windows specific; if you want you can
# temporarily move this up to the all-platforms list to test
# it on other platforms. It's not there in general because
# it's not _really_ worth it to support on other platforms,
# since no one really wants to use it on other platforms.
_reactors.extend([
"twisted.internet.gtk2reactor.PortableGtkReactor",
"twisted.internet.gireactor.PortableGIReactor",
"twisted.internet.gtk3reactor.PortableGtk3Reactor",
"twisted.internet.win32eventreactor.Win32Reactor",
"twisted.internet.iocpreactor.reactor.IOCPReactor"])
else:
_reactors.extend([
"twisted.internet.glib2reactor.Glib2Reactor",
"twisted.internet.gtk2reactor.Gtk2Reactor",
"twisted.internet.gireactor.GIReactor",
"twisted.internet.gtk3reactor.Gtk3Reactor"])
if platform.isMacOSX():
_reactors.append("twisted.internet.cfreactor.CFReactor")
else:
_reactors.extend([
"twisted.internet.pollreactor.PollReactor",
"twisted.internet.epollreactor.EPollReactor"])
if not platform.isLinux():
# Presumably Linux is not going to start supporting kqueue, so
# skip even trying this configuration.
_reactors.extend([
# Support KQueue on non-OS-X POSIX platforms for now.
"twisted.internet.kqreactor.KQueueReactor",
])
reactorFactory = None
originalHandler = None
requiredInterfaces = None
skippedReactors = {}
def setUp(self):
"""
Clear the SIGCHLD handler, if there is one, to ensure an environment
like the one which exists prior to a call to L{reactor.run}.
"""
if not platform.isWindows():
self.originalHandler = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def tearDown(self):
"""
Restore the original SIGCHLD handler and reap processes as long as
there seem to be any remaining.
"""
if self.originalHandler is not None:
signal.signal(signal.SIGCHLD, self.originalHandler)
if process is not None:
begin = time.time()
while process.reapProcessHandlers:
log.msg(
"ReactorBuilder.tearDown reaping some processes %r" % (
process.reapProcessHandlers,))
process.reapAllProcesses()
# The process should exit on its own. However, if it
# doesn't, we're stuck in this loop forever. To avoid
# hanging the test suite, eventually give the process some
# help exiting and move on.
time.sleep(0.001)
if time.time() - begin > 60:
for pid in process.reapProcessHandlers:
os.kill(pid, signal.SIGKILL)
raise Exception(
"Timeout waiting for child processes to exit: %r" % (
process.reapProcessHandlers,))
def unbuildReactor(self, reactor):
"""
Clean up any resources which may have been allocated for the given
reactor by its creation or by a test which used it.
"""
# Chris says:
#
# XXX These explicit calls to clean up the waker (and any other
# internal readers) should become obsolete when bug #3063 is
# fixed. -radix, 2008-02-29. Fortunately it should probably cause an
# error when bug #3063 is fixed, so it should be removed in the same
# branch that fixes it.
#
# -exarkun
reactor._uninstallHandler()
if getattr(reactor, '_internalReaders', None) is not None:
for reader in reactor._internalReaders:
reactor.removeReader(reader)
reader.connectionLost(None)
reactor._internalReaders.clear()
# Here's an extra thing unrelated to wakers but necessary for
# cleaning up after the reactors we make. -exarkun
reactor.disconnectAll()
# It would also be bad if any timed calls left over were allowed to
# run.
calls = reactor.getDelayedCalls()
for c in calls:
c.cancel()
def buildReactor(self):
"""
Create and return a reactor using C{self.reactorFactory}.
"""
try:
from twisted.internet.cfreactor import CFReactor
from twisted.internet import reactor as globalReactor
except ImportError:
pass
else:
if (isinstance(globalReactor, CFReactor)
and self.reactorFactory is CFReactor):
raise SkipTest(
"CFReactor uses APIs which manipulate global state, "
"so it's not safe to run its own reactor-builder tests "
"under itself")
try:
reactor = self.reactorFactory()
except:
# Unfortunately, not all errors which result in a reactor
# being unusable are detectable without actually
# instantiating the reactor. So we catch some more here
# and skip the test if necessary. We also log it to aid
# with debugging, but flush the logged error so the test
# doesn't fail.
log.err(None, "Failed to install reactor")
self.flushLoggedErrors()
raise SkipTest(Failure().getErrorMessage())
else:
if self.requiredInterfaces is not None:
missing = filter(
lambda required: not required.providedBy(reactor),
self.requiredInterfaces)
if missing:
self.unbuildReactor(reactor)
raise SkipTest("%s does not provide %s" % (
fullyQualifiedName(reactor.__class__),
",".join([fullyQualifiedName(x) for x in missing])))
self.addCleanup(self.unbuildReactor, reactor)
return reactor
def runReactor(self, reactor, timeout=None):
"""
Run the reactor for at most the given amount of time.
@param reactor: The reactor to run.
@type timeout: C{int} or C{float}
@param timeout: The maximum amount of time, specified in seconds, to
allow the reactor to run. If the reactor is still running after
this much time has elapsed, it will be stopped and an exception
raised. If C{None}, the default test method timeout imposed by
Trial will be used. This depends on the L{IReactorTime}
implementation of C{reactor} for correct operation.
@raise TimeoutError: If the reactor is still running after C{timeout}
seconds.
"""
if timeout is None:
timeout = self.getTimeout()
timedOut = []
def stop():
timedOut.append(None)
reactor.stop()
reactor.callLater(timeout, stop)
reactor.run()
if timedOut:
raise TimeoutError(
"reactor still running after %s seconds" % (timeout,))
def makeTestCaseClasses(cls):
"""
Create a L{TestCase} subclass which mixes in C{cls} for each known
reactor and return a dict mapping their names to them.
"""
classes = {}
for reactor in cls._reactors:
shortReactorName = reactor.split(".")[-1]
name = (cls.__name__ + "." + shortReactorName).replace(".", "_")
class testcase(cls, TestCase):
__module__ = cls.__module__
if reactor in cls.skippedReactors:
skip = cls.skippedReactors[reactor]
try:
reactorFactory = namedAny(reactor)
except:
skip = Failure().getErrorMessage()
testcase.__name__ = name
classes[testcase.__name__] = testcase
return classes
makeTestCaseClasses = classmethod(makeTestCaseClasses)
__all__ = ['ReactorBuilder']
| |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""QR-DQN agent class."""
# pylint: disable=g-bad-import-order
from typing import Any, Callable, Mapping, Text
from absl import logging
import chex
import dm_env
import jax
import jax.numpy as jnp
import numpy as np
import optax
import rlax
from dqn_zoo import parts
from dqn_zoo import processors
from dqn_zoo import replay as replay_lib
# Batch variant of quantile_q_learning with fixed tau input across batch.
_batch_quantile_q_learning = jax.vmap(
rlax.quantile_q_learning, in_axes=(0, None, 0, 0, 0, 0, 0, None))
class QrDqn(parts.Agent):
"""Quantile Regression DQN agent."""
def __init__(
self,
preprocessor: processors.Processor,
sample_network_input: jnp.ndarray,
network: parts.Network,
quantiles: jnp.ndarray,
optimizer: optax.GradientTransformation,
transition_accumulator: Any,
replay: replay_lib.TransitionReplay,
batch_size: int,
exploration_epsilon: Callable[[int], float],
min_replay_capacity_fraction: float,
learn_period: int,
target_network_update_period: int,
huber_param: float,
rng_key: parts.PRNGKey,
):
self._preprocessor = preprocessor
self._replay = replay
self._transition_accumulator = transition_accumulator
self._batch_size = batch_size
self._exploration_epsilon = exploration_epsilon
self._min_replay_capacity = min_replay_capacity_fraction * replay.capacity
self._learn_period = learn_period
self._target_network_update_period = target_network_update_period
# Initialize network parameters and optimizer.
self._rng_key, network_rng_key = jax.random.split(rng_key)
self._online_params = network.init(network_rng_key,
sample_network_input[None, ...])
self._target_params = self._online_params
self._opt_state = optimizer.init(self._online_params)
# Other agent state: last action, frame count, etc.
self._action = None
self._frame_t = -1 # Current frame index.
self._statistics = {'state_value': np.nan}
# Define jitted loss, update, and policy functions here instead of as
# class methods, to emphasize that these are meant to be pure functions
# and should not access the agent object's state via `self`.
def loss_fn(online_params, target_params, transitions, rng_key):
"""Calculates loss given network parameters and transitions."""
# Compute Q value distributions.
_, online_key, target_key = jax.random.split(rng_key, 3)
dist_q_tm1 = network.apply(online_params, online_key,
transitions.s_tm1).q_dist
dist_q_target_t = network.apply(target_params, target_key,
transitions.s_t).q_dist
losses = _batch_quantile_q_learning(
dist_q_tm1,
quantiles,
transitions.a_tm1,
transitions.r_t,
transitions.discount_t,
dist_q_target_t, # No double Q-learning here.
dist_q_target_t,
huber_param,
)
chex.assert_shape(losses, (self._batch_size,))
loss = jnp.mean(losses)
return loss
def update(rng_key, opt_state, online_params, target_params, transitions):
"""Computes learning update from batch of replay transitions."""
rng_key, update_key = jax.random.split(rng_key)
d_loss_d_params = jax.grad(loss_fn)(online_params, target_params,
transitions, update_key)
updates, new_opt_state = optimizer.update(d_loss_d_params, opt_state)
new_online_params = optax.apply_updates(online_params, updates)
return rng_key, new_opt_state, new_online_params
self._update = jax.jit(update)
def select_action(rng_key, network_params, s_t, exploration_epsilon):
"""Samples action from eps-greedy policy wrt Q-values at given state."""
rng_key, apply_key, policy_key = jax.random.split(rng_key, 3)
q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0]
a_t = rlax.epsilon_greedy().sample(policy_key, q_t, exploration_epsilon)
v_t = jnp.max(q_t, axis=-1)
return rng_key, a_t, v_t
self._select_action = jax.jit(select_action)
def step(self, timestep: dm_env.TimeStep) -> parts.Action:
"""Selects action given timestep and potentially learns."""
self._frame_t += 1
timestep = self._preprocessor(timestep)
if timestep is None: # Repeat action.
action = self._action
else:
action = self._action = self._act(timestep)
for transition in self._transition_accumulator.step(timestep, action):
self._replay.add(transition)
if self._replay.size < self._min_replay_capacity:
return action
if self._frame_t % self._learn_period == 0:
self._learn()
if self._frame_t % self._target_network_update_period == 0:
self._target_params = self._online_params
return action
def reset(self) -> None:
"""Resets the agent's episodic state such as frame stack and action repeat.
This method should be called at the beginning of every episode.
"""
self._transition_accumulator.reset()
processors.reset(self._preprocessor)
self._action = None
def _act(self, timestep) -> parts.Action:
"""Selects action given timestep, according to epsilon-greedy policy."""
s_t = timestep.observation
self._rng_key, a_t, v_t = self._select_action(self._rng_key,
self._online_params, s_t,
self.exploration_epsilon)
a_t, v_t = jax.device_get((a_t, v_t))
self._statistics['state_value'] = v_t
return parts.Action(a_t)
def _learn(self) -> None:
"""Samples a batch of transitions from replay and learns from it."""
logging.log_first_n(logging.INFO, 'Begin learning', 1)
transitions = self._replay.sample(self._batch_size)
self._rng_key, self._opt_state, self._online_params = self._update(
self._rng_key,
self._opt_state,
self._online_params,
self._target_params,
transitions,
)
@property
def online_params(self) -> parts.NetworkParams:
"""Returns current parameters of Q-network."""
return self._online_params
@property
def statistics(self) -> Mapping[Text, float]:
"""Returns current agent statistics as a dictionary."""
# Check for DeviceArrays in values as this can be very slow.
assert all(
not isinstance(x, jnp.DeviceArray) for x in self._statistics.values())
return self._statistics
@property
def exploration_epsilon(self) -> float:
"""Returns epsilon value currently used by (eps-greedy) behavior policy."""
return self._exploration_epsilon(self._frame_t)
def get_state(self) -> Mapping[Text, Any]:
"""Retrieves agent state as a dictionary (e.g. for serialization)."""
state = {
'rng_key': self._rng_key,
'frame_t': self._frame_t,
'opt_state': self._opt_state,
'online_params': self._online_params,
'target_params': self._target_params,
'replay': self._replay.get_state(),
}
return state
def set_state(self, state: Mapping[Text, Any]) -> None:
"""Sets agent state from a (potentially de-serialized) dictionary."""
self._rng_key = state['rng_key']
self._frame_t = state['frame_t']
self._opt_state = jax.device_put(state['opt_state'])
self._online_params = jax.device_put(state['online_params'])
self._target_params = jax.device_put(state['target_params'])
self._replay.set_state(state['replay'])
| |
##
# \namespace cross3d.abstract.studiomaxapplication
#
# \remarks The StudiomaxApplication class will define all operations for application interaction. It is a singleton class, so calling cross3d.Application() will
# always return the same instance of Application. One of its main functions is connecting application callbacks to cross3d.Dispatch.
#
# The StudiomaxApplication is a QObject instance and any changes to the scene data can be controlled by connecting to the signals defined here.
#
# When subclassing the AbstractScene, methods tagged as @abstractmethod will be required to be overwritten. Methods tagged with [virtual]
# are flagged such that additional operations could be required based on the needs of the method. All @abstractmethod methods MUST be implemented
# in a subclass.
#
# \author Mikeh
# \author Blur Studio
# \date 06/07/11
#
import cross3d
from cross3d.abstract.abstractapplication import AbstractApplication
from Py3dsMax import mxs
from cross3d.enum import EnumGroup, Enum
from PyQt4.QtCore import QTimer
_n = mxs.pyhelper.namify
dispatch = None
# initialize callback scripts
_STUDIOMAX_CALLBACK_TEMPLATE = """
global cross3d
if ( cross3d == undefined ) do ( cross3d = pymax.import "cross3d" )
if ( cross3d != undefined ) do (
local ms_args = (callbacks.notificationParam())
cross3d.%(cls)s.%(function)s "%(signal)s" %(args)s
)
"""
_STUDIOMAX_CALLBACK_TEMPLATE_NO_ARGS = """
global cross3d
if ( cross3d == undefined ) do ( cross3d = pymax.import "cross3d" )
if ( cross3d != undefined ) do (
cross3d.%(cls)s.%(function)s "%(signal)s"
)
"""
_STUDIOMAX_VIEWPORT_TEMPLATE = """
fn blurfn_%(signal)s =
(
if ( cross3d == undefined ) do ( cross3d = pymax.import "cross3d" )
if ( cross3d != undefined ) do (
cross3d.%(cls)s.%(function)s "%(signal)s"
)
)
"""
class _ConnectionType(EnumGroup):
General = Enum()
Viewport = Enum()
class _ConnectionDef:
""" Class that stores all neccissary info to connect cross3d.dispatch to StudioMax
Args:
signal str: The name of the cross3d.dispatch signal.
callback str: The name of StudioMax callback.
arguments str: A string of maxscript arguments passed to signal when the callback is emitted.
function str: The name of the cross3d.dispatch function. Used in the callback maxscript.
callbackType _ConnectionType: Controls how the callback is connected to StudioMax.
cls str: The name of the class called in cross3d. Normally 'dispatch'.
associated _ConnectionDef: When this signal is connected, all _ConnectionDef's in this list
are also connected to. This is used by filePostMerge to disable all callbacks durring
the opening of a file.
"""
def __init__(self, signal, callback, arguments='', function='dispatch', callbackType=_ConnectionType.General, cls='dispatch', associated=[]):
self.signal = signal
self.callback = callback
self.arguments = arguments
self.function = function
self.callbackType = callbackType
self.cls = cls
self.associated = associated
@staticmethod
def asDict(signal, callback, arguments = '', function = 'dispatch', callbackType=_ConnectionType.General):
return {signal:_ConnectionDef(signal, callback, arguments, function, callbackType)}
class _ConnectionStore(object):
def __init__(self):
self._store = []
def update(self, connection):
self._store.append(connection)
def getConnectionsBySignalName(self, signal):
return [c for c in self._store if c.signal == signal]
def getSignalNames(self):
return list(set([c.signal for c in self._store]))
class StudiomaxApplication(AbstractApplication):
# create a mapping of callbacks to be used when connecting signals
_connectionMap = _ConnectionStore()
_connectionMap.update(_ConnectionDef('sceneNewRequested', 'systemPreNew'))
_connectionMap.update(_ConnectionDef('sceneNewFinished', 'systemPostNew'))
_connectionMap.update(
_ConnectionDef(
'sceneOpenRequested',
'filePreOpen',
'""',
function='_prePostCallback',
cls='application'
)
)
_connectionMap.update(
_ConnectionDef(
'sceneOpenFinished',
'filePostOpen',
'""',
function='_prePostCallback',
cls='application',
associated=_connectionMap.getConnectionsBySignalName('sceneOpenRequested')
)
)
_connectionMap.update(
_ConnectionDef(
'sceneMergeRequested',
'filePreMerge',
function='_prePostCallback',
cls='application'
)
)
_connectionMap.update(_ConnectionDef('sceneReferenceRequested', 'objectXrefPreMerge'))
_connectionMap.update(_ConnectionDef('sceneReferenceRequested', 'sceneXrefPreMerge'))
_connectionMap.update(
_ConnectionDef(
'sceneMergeFinished',
'filePostMerge',
function='_prePostCallback',
cls='application',
associated=_connectionMap.getConnectionsBySignalName('sceneMergeRequested')
)
)
_connectionMap.update(_ConnectionDef('sceneReferenceFinished', 'objectXrefPostMerge'))
_connectionMap.update(_ConnectionDef('sceneReferenceFinished', 'sceneXrefPostMerge'))
_connectionMap.update(_ConnectionDef('sceneSaveRequested', 'filePreSave', '(if (ms_args != undefined) then (ms_args as string) else "")'))
_connectionMap.update(_ConnectionDef('sceneSaveFinished', 'filePostSave', '(if (ms_args != undefined) then (ms_args as string) else "")'))
_connectionMap.update(_ConnectionDef('scenePreReset', 'systemPreReset'))
_connectionMap.update(_ConnectionDef('sceneReset', 'systemPostReset'))
_connectionMap.update(_ConnectionDef('layerCreated', 'layerCreated'))
_connectionMap.update(_ConnectionDef('layerDeleted', 'layerDeleted'))
_connectionMap.update(_ConnectionDef('startupFinished', 'postSystemStartup'))
_connectionMap.update(_ConnectionDef('shutdownStarted', 'preSystemShutdown'))
_connectionMap.update(_ConnectionDef('sceneImportFinished', 'postImport'))
_connectionMap.update(_ConnectionDef('selectionChanged', 'selectionSetChanged'))
_connectionMap.update(_ConnectionDef('objectFreeze', 'nodeFreeze', 'ms_args', 'dispatchObject'))
_connectionMap.update(_ConnectionDef('objectUnfreeze', 'nodeUnfreeze', 'ms_args', 'dispatchObject'))
_connectionMap.update(_ConnectionDef('objectHide', 'nodeHide', 'ms_args', 'dispatchObject'))
_connectionMap.update(_ConnectionDef('objectUnHide', 'nodeUnHide', 'ms_args', 'dispatchObject'))
_connectionMap.update(_ConnectionDef('objectRenamed', 'nodeNameSet', '(if (ms_args != undefined) then (#(ms_args[1], ms_args[2], ms_args[3])) else #("", "", ""))', 'dispatchRename'))
_connectionMap.update(_ConnectionDef('objectCreated', 'nodeCreated', 'ms_args', 'dispatchObject'))
_connectionMap.update(_ConnectionDef('objectCloned', 'nodeCloned', 'ms_args', 'dispatchObject'))
_connectionMap.update(_ConnectionDef('objectAdded', 'sceneNodeAdded', 'ms_args', 'dispatchObject'))
_connectionMap.update(_ConnectionDef('objectPreDelete', 'nodePreDelete', 'ms_args', 'preDelete'))
_connectionMap.update(_ConnectionDef('objectPostDelete', 'nodePostDelete', function = 'postDelete'))
_connectionMap.update(_ConnectionDef('objectParented', 'nodeLinked', 'ms_args', 'dispatchObject'))
_connectionMap.update(_ConnectionDef('objectUnparented', 'nodeUnlinked', 'ms_args', 'dispatchObject'))
_connectionMap.update(_ConnectionDef('viewportRedrawn', '', function='dispatchFunction', callbackType=_ConnectionType.Viewport))
def __init__(self):
super(StudiomaxApplication, self).__init__()
self._sceneMergeFinishedTimer = QTimer(self)
self._sceneMergeFinishedTimer.setSingleShot(True)
self._sceneMergeFinishedTimer.timeout.connect(self._sceneMergeFinishedTimeout)
# Variable used to prevent emiting signals when a file is being opened.
self._openingScene = False
self._disconnectNames = set()
def _connectStudiomaxSignal(self, connDef, cross3dSignal):
"""
\remarks Responsible for connecting a signal to studiomax
"""
# store the maxscript methods needed
if connDef.callbackType == _ConnectionType.Viewport:
signal = _STUDIOMAX_VIEWPORT_TEMPLATE % {
'cls':'dispatch',
'function':connDef.function,
'signal':cross3dSignal
}
# Ensure that if the old signal existed it is removed before redefining it.
# If function is undefined it will do nothing
mxs.unregisterRedrawViewsCallback(getattr(mxs, 'blurfn_%s' % cross3dSignal))
mxs.execute(signal)
mxs.registerRedrawViewsCallback(getattr(mxs, 'blurfn_%s' % cross3dSignal))
else:
# Connect the callback
self._addCallback(connDef, cross3dSignal)
# Connect any associated callbacks using a diffrent ID name allows us to disconnect
# this signal without affecting any direct connections to the associated callbacks
for reqDef in connDef.associated:
self._addCallback(reqDef, reqDef.signal, 'cross3dcallbacks_{}'.format(connDef.callback))
def _addCallback(self, connDef, cross3dSignal, callbackName='cross3dcallbacks'):
if connDef.arguments:
script = _STUDIOMAX_CALLBACK_TEMPLATE % {
'cls':connDef.cls,
'function':connDef.function,
'signal': cross3dSignal,
'args': connDef.arguments
}
else:
script = _STUDIOMAX_CALLBACK_TEMPLATE_NO_ARGS % {
'cls':connDef.cls,
'function':connDef.function,
'signal': cross3dSignal
}
mxs.callbacks.addScript( _n(connDef.callback), script, id = _n(callbackName) )
self._disconnectNames.add(callbackName)
def _prePostCallback(self, signal, *args):
""" Handle pre\post callbacks intelegently to reduce the number of callbacks.
"""
if self.shouldBlockSignal(signal, dispatch.signalsBlocked()):
# Ignore this callback if signals are blocked.
return
if signal == 'sceneMergeFinished':
# Don't just emit this callback. The timer is used to allow the application
# to call sceneMergeRequested if there is another merge request.
# This prevents a single merge action emitting 3 sceneMergeFinished callbacks
self._sceneMergeFinishedTimer.start(1000)
return
elif signal == 'sceneMergeRequested':
# Stop the timer because we started on a new merge.
# This prevents a single merge action emitting 3 sceneMergeFinished callbacks
self._sceneMergeFinishedTimer.stop()
return
elif signal == 'sceneOpenFinished':
# Re-Enable sending signals at a application level.
self._openingScene = False
elif signal == 'sceneOpenRequested':
# Disable signals at the application level. This prevents sceneMerge signals
# from being emitted durring the open process.
self._openingScene = True
# Emit the signal as normal.
dispatch.dispatch(signal, *args)
# Note: This code doesn't handle custom functions, so if its needed by this system
# it will have to be added.
def _sceneMergeFinishedTimeout(self):
""" Emit the sceneMergeFinished when the timeout expires. """
dispatch.dispatch('sceneMergeFinished')
def allowedCharacters(self):
return 'A-Za-z0-9_. /+*<>=|-'
def connect(self):
"""
\remarks connect application specific callbacks to <cross3d.Dispatch>, dispatch will convert the native object to a cross3d object
and emit a signal.
connect is called when the first <cross3d.Dispatch> signal is connected.
\return <bool> The Connection was successfull
"""
global dispatch
import cross3d
dispatch = cross3d.dispatch
return super(StudiomaxApplication, self).connect()
def connectCallback(self, signal):
"""
\remarks Connects a single callback. This allows cross3d to only have to respond to callbacks that tools actually
need, instead of all callbacks.
"""
if signal in self._connectionMap.getSignalNames():
connections = self._connectionMap.getConnectionsBySignalName(signal)
for object in connections:
self._connectStudiomaxSignal(object, signal)
else:
cross3d.logger.debug('Connect: Signal %s has no signal map' % signal)
def disconnectCallback(self, signal):
"""
\remarks Disconnect a single callback when it is no longer used.
"""
if signal in self._connectionMap.getSignalNames():
connections = self._connectionMap.getConnectionsBySignalName(signal)
for connDef in connections:
if connDef.callbackType == _ConnectionType.Viewport:
mxs.unregisterRedrawViewsCallback(getattr(mxs, 'blurfn_%s' % connDef.signal))
else:
mxs.callbacks.removeScripts(_n(connDef.callback), id = _n('cross3dcallbacks'))
for reqDef in connDef.associated:
mxs.callbacks.removeScripts(_n(reqDef.callback), id = _n('cross3dcallbacks_{}'.format(connDef.callback)))
else:
cross3d.logger.debug('Disconnect: Signal %s has no signal map' % signal)
def disconnect(self):
"""
\remarks disconnect application specific callbacks to <cross3d.Dispatch>. This will be called when <cross3d.Dispatch> is deleted,
disconnect is called when the last <cross3d.Dispatch> signal is disconnected.
"""
# remove all normal callbacks
for name in self._disconnectNames:
mxs.callbacks.removeScripts(id = _n(name))
self._sceneMergeFinishedTimer.stop()
# undefine the add callback function
mxs.cross3daddcallback = None
# remove the callback pointer to cross3d
mxs.cross3d = None
# remove viewport callbacks
self.disconnectCallback('viewportRedraw')
return
def log(self, message):
# TODO: Can't seem to access the native log message.
print message
return True
def installDir(self):
""" Returns the path to the application's install directory
:return: path string
:rtyp: str
"""
return mxs.pathConfig.resolvePathSymbols('$max')
def isSilent(self):
"""Returns whether 3ds Max is running in silent mode."""
return mxs.GetQuietMode()
def preDeleteObject(self, callback, *args):
"""
\remarks Pre-process the object that is going to be deleted.
"""
if args:
self._objectToBeDeleted = args[0].name
def postDeleteObject(self, callback, *args):
"""
\remarks Emits the signal that a object has been deleted. This method is used for applications like max that generate a pre and post delete signal.
"""
if self._objectToBeDeleted:
dispatch.objectDeleted.emit(self._objectToBeDeleted)
def name( self ):
return "StudioMax"
def version(self, major=True):
version = mxs.maxVersion()
if major:
return int(version[0] / 1000)
else:
return '.'.join([unicode(token) for token in version])
def refresh(self):
if not self._blockRefresh:
mxs.completeRedraw()
return True
return False
def year(self):
return 1998 + self.version()
def nameSpaceSeparator(self):
return '.'
def animationClipExtension(self):
return 'xaf'
def sceneFileExtension(self):
return 'max'
def shouldBlockSignal(self, signal, default):
""" Allows the Application to conditionally block a signal.
Normally you should pass cross3d.dispatch.signalsBlocked() to default.
In general if default is True this method should just return True. This will
prevent unexpected signal emits when a script called
cross3d.dispatch.blockSignals(True) to block all signals.
Args:
signal (str): The name of the signal to check if it should be blocked.
default (bool): Returned if signal doesn't match any requirements.
Returns:
bool: If the signal should be blocked.
"""
if default:
# If signals are blocked assume all signals should be blocked
return default
if signal == 'sceneOpenFinished':
return False
return self._openingScene
def modelFileExtension(self):
return self.sceneFileExtension()
def nameAndVersion( self ):
version = mxs.maxVersion()
jobTypeDic = {
'5100' : "Max5",
'6000': "Max6",
'7000': "Max7",
'8000': "Max8",
'9000': "Max9",
'10000': "Max10",
'11000': "Max2009",
'12000': "Max2010",
'14000': "Max2012",
'16000': "Max2014",
'18000': "Max2016",
'default': "Max2014"}
if jobTypeDic.has_key(str(version[0])):
jobType = jobTypeDic[str(version[0])]
else:
jobType = jobTypeDic['default']
return jobType
def id(self):
"""
\remarks implements AbstractScene.softwareId to return a unique version/bits string information that will represent the exact
version of the software being run.
\return <str>
"""
mversion = mxs.maxVersion()[0]/1000
sixtyfour = ''
if ( mversion > 10 ):
mversion = 2009 + (mversion-11) # shifted to years at version 11
if ( mxs.is64BitApplication() ):
sixtyfour = '_64'
return 'MAX%i%s' % (mversion,sixtyfour)
# register the symbol
cross3d.registerSymbol( 'Application', StudiomaxApplication)
# Creating a single instance of Application for all code to use.
cross3d.registerSymbol( 'application', StudiomaxApplication())
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg.linalg_impl.matrix_exponential."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
def np_expm(x): # pylint: disable=invalid-name
"""Slow but accurate Taylor series matrix exponential."""
y = np.zeros(x.shape, dtype=x.dtype)
xn = np.eye(x.shape[0], dtype=x.dtype)
for n in range(40):
if n > 0:
xn /= float(n)
y += xn
xn = np.dot(xn, x)
return y
class ExponentialOpTest(test.TestCase):
def _verifyExponential(self, x, np_type):
inp = x.astype(np_type)
with self.cached_session(use_gpu=True):
tf_ans = linalg_impl.matrix_exponential(inp)
if x.size == 0:
np_ans = np.empty(x.shape, dtype=np_type)
else:
if x.ndim > 2:
np_ans = np.zeros(inp.shape, dtype=np_type)
for i in itertools.product(*[range(x) for x in inp.shape[:-2]]):
np_ans[i] = np_expm(inp[i])
else:
np_ans = np_expm(inp)
out = tf_ans.eval()
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-3)
def _verifyExponentialReal(self, x):
for np_type in [np.float32, np.float64]:
self._verifyExponential(x, np_type)
def _verifyExponentialComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifyExponential(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0),
np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def testNonsymmetricReal(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
self._verifyExponentialReal(matrix1)
self._verifyExponentialReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyExponentialReal(self._makeBatch(matrix1, matrix2))
def testNonsymmetricComplex(self):
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyExponentialComplex(matrix1)
self._verifyExponentialComplex(matrix2)
# Complex batch
self._verifyExponentialComplex(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefiniteReal(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._verifyExponentialReal(matrix1)
self._verifyExponentialReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyExponentialReal(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefiniteComplex(self):
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyExponentialComplex(matrix1)
self._verifyExponentialComplex(matrix2)
# Complex batch
self._verifyExponentialComplex(self._makeBatch(matrix1, matrix2))
def testNonSquareMatrix(self):
# When the exponential of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_impl.matrix_exponential(np.array([[1., 2., 3.], [3., 4., 5.]]))
def testWrongDimensions(self):
# The input to the exponential should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_impl.matrix_exponential(tensor3)
def testEmpty(self):
self._verifyExponentialReal(np.empty([0, 2, 2]))
self._verifyExponentialReal(np.empty([2, 0, 0]))
def testDynamic(self):
with self.session(use_gpu=True) as sess:
inp = array_ops.placeholder(ops.dtypes.float32)
expm = linalg_impl.matrix_exponential(inp)
matrix = np.array([[1., 2.], [3., 4.]])
sess.run(expm, feed_dict={inp: matrix})
def testConcurrentExecutesWithoutError(self):
with self.session(use_gpu=True) as sess:
matrix1 = random_ops.random_normal([5, 5], seed=42)
matrix2 = random_ops.random_normal([5, 5], seed=42)
expm1 = linalg_impl.matrix_exponential(matrix1)
expm2 = linalg_impl.matrix_exponential(matrix2)
expm = sess.run([expm1, expm2])
self.assertAllEqual(expm[0], expm[1])
class MatrixExponentialBenchmark(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
def benchmarkMatrixExponentialOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
expm = linalg_impl.matrix_exponential(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(expm),
min_iters=25,
name="matrix_exponential_cpu_{shape}".format(
shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
expm = linalg_impl.matrix_exponential(matrix)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(expm),
min_iters=25,
name="matrix_exponential_gpu_{shape}".format(
shape=shape))
def _TestRandomSmall(dtype, batch_dims, size):
def Test(self):
np.random.seed(42)
shape = batch_dims + (size, size)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=shape).astype(dtype)
self._verifyExponentialReal(matrix)
return Test
def _TestL1Norms(dtype, shape, scale):
def Test(self):
np.random.seed(42)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape)).reshape(shape).astype(dtype)
print(dtype, shape, scale, matrix)
l1_norm = np.max(np.sum(np.abs(matrix), axis=matrix.ndim-2))
matrix /= l1_norm
self._verifyExponentialReal(scale * matrix)
return Test
if __name__ == "__main__":
for dtype_ in [np.float32, np.float64, np.complex64, np.complex128]:
for batch_ in [(), (2,), (2, 2)]:
for size_ in [4, 7]:
name = "%s_%d_%d" % (dtype_.__name__, len(batch_), size_)
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestRandomSmall(dtype_, batch_, size_))
for shape_ in [(3, 3), (2, 3, 3)]:
for dtype_ in [np.float32, np.complex64]:
for scale_ in [0.1, 1.5, 5.0, 20.0]:
name = "%s_%d_%d" % (dtype_.__name__, len(shape_), int(scale_*10))
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestL1Norms(dtype_, shape_, scale_))
for dtype_ in [np.float64, np.complex128]:
for scale_ in [0.01, 0.2, 0.5, 1.5, 6.0, 25.0]:
name = "%s_%d_%d" % (dtype_.__name__, len(shape_), int(scale_*100))
setattr(ExponentialOpTest, "testL1Norms_" + name,
_TestL1Norms(dtype_, shape_, scale_))
test.main()
| |
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""FNode are the building blocks of formulae."""
import collections
import pysmt
import pysmt.smtlib
from pysmt.operators import (FORALL, EXISTS, AND, OR, NOT, IMPLIES, IFF,
SYMBOL, FUNCTION,
REAL_CONSTANT, BOOL_CONSTANT, INT_CONSTANT,
PLUS, MINUS, TIMES, DIV,
LE, LT, EQUALS,
ITE,
TOREAL,
BV_CONSTANT, BV_NOT, BV_AND, BV_OR, BV_XOR,
BV_CONCAT, BV_EXTRACT,
BV_ULT, BV_ULE, BV_NEG, BV_ADD, BV_SUB,
BV_MUL, BV_UDIV, BV_UREM,
BV_LSHL, BV_LSHR,
BV_ROL, BV_ROR,
BV_ZEXT, BV_SEXT,
BV_SLT, BV_SLE,
BV_COMP,
BV_SDIV, BV_SREM,
BV_ASHR,
STR_CONSTANT,
STR_LENGTH, STR_CONCAT, STR_CONTAINS,
STR_INDEXOF, STR_REPLACE, STR_SUBSTR,
STR_PREFIXOF, STR_SUFFIXOF,
STR_TO_INT, INT_TO_STR,
STR_CHARAT,
ARRAY_SELECT, ARRAY_STORE, ARRAY_VALUE,
ALGEBRAIC_CONSTANT)
from pysmt.operators import (BOOL_OPERATORS, THEORY_OPERATORS,
BV_OPERATORS, IRA_OPERATORS, ARRAY_OPERATORS,
STR_OPERATORS,
RELATIONS, CONSTANTS)
from pysmt.typing import BOOL, REAL, INT, BVType, STRING
from pysmt.decorators import deprecated, assert_infix_enabled
from pysmt.utils import twos_complement
from pysmt.constants import (Fraction, is_python_integer,
is_python_rational, is_python_boolean)
from pysmt.exceptions import (PysmtValueError, PysmtModeError,
UnsupportedOperatorError)
FNodeContent = collections.namedtuple("FNodeContent",
["node_type", "args", "payload"])
class FNode(object):
r"""FNode represent the basic structure for representing a formula.
FNodes are built using the FormulaManager, and should not be
explicitely instantiated, since the FormulaManager takes care of
memoization, thus guaranteeing that equivalent are represented by
the same object.
An FNode is a wrapper to the structure FNodeContent. FNodeContent
defines the type of the node (see operators.py), its arguments
(e.g., for the formula A /\ B, args=(A,B)) and its payload,
content of the node that is not an FNode (e.g., for an integer
constant, the payload might be the python value 1).
The node_id is an integer uniquely identifying the node within the
FormulaManager it belongs.
"""
__slots__ = ["_content", "_node_id"]
def __init__(self, content, node_id):
self._content = content
self._node_id = node_id
return
# __eq__ is left as default while __hash__ uses the node id. This
# is because we always have shared FNodes, hence in a single
# environment two nodes have always different ids, but in
# different environments they can have the same id. This is not an
# issue since, by default, equality coincides with identity.
def __hash__(self):
return self._node_id
def node_id(self):
return self._node_id
def node_type(self):
return self._content.node_type
def args(self):
"""Returns the subformulae."""
return self._content.args
def arg(self, idx):
"""Return the given subformula at the given position."""
return self._content.args[idx]
def get_free_variables(self):
"""Return the set of Symbols that are free in the formula."""
return _env().fvo.get_free_variables(self)
def get_atoms(self):
"""Return the set of atoms appearing in the formula."""
return _env().ao.get_atoms(self)
def simplify(self):
"""Return a simplified version of the formula."""
return _env().simplifier.simplify(self)
def substitute(self, subs, interpretations=None):
"""Return a formula in which subformula have been substituted.
subs is a dictionary mapping terms to be subtituted with their
substitution.
interpretations is a dictionary mapping function symbols to an FunctionInterpretation objects describing the semantics of the function.
"""
return _env().substituter.substitute(self, subs=subs,
interpretations=interpretations)
def size(self, measure=None):
"""Return the size of the formula according to the given metric.
See :py:class:`SizeOracle`
"""
return _env().sizeo.get_size(self, measure)
def get_type(self):
"""Return the type of the formula by calling the Type-Checker.
See :py:class:`SimpleTypeChecker`
"""
return _env().stc.get_type(self)
def is_constant(self, _type=None, value=None):
"""Test whether the formula is a constant.
Optionally, check that the constant is of the given type and value.
"""
if self.node_type() not in CONSTANTS:
if self.node_type() == ARRAY_VALUE:
# An array value can be a constant if all its children
# are constants
for c in self.args():
if not c.is_constant():
return False
if _type is not None or value is not None:
raise PysmtValueError("constant type and value checking " \
"is not available for array values")
return True
return False
if _type is not None:
if _type.is_int_type() and self.node_type() != INT_CONSTANT:
return False
if _type.is_real_type() and self.node_type() != REAL_CONSTANT:
return False
if _type.is_bool_type() and self.node_type() != BOOL_CONSTANT:
return False
if _type.is_string_type() and self.node_type() != STR_CONSTANT:
return False
if _type.is_bv_type():
if self.node_type() != BV_CONSTANT:
return False
if self._content.payload[1] != _type.width:
return False
if value is not None:
return value == self.constant_value()
return True
def is_bool_constant(self, value=None):
"""Test whether the formula is a Boolean constant.
Optionally, check that the constant has the given value.
"""
return self.is_constant(BOOL, value)
def is_real_constant(self, value=None):
"""Test whether the formula is a Real constant.
Optionally, check that the constant has the given value.
"""
return self.is_constant(REAL, value)
def is_int_constant(self, value=None):
"""Test whether the formula is an Integer constant.
Optionally, check that the constant has the given value.
"""
return self.is_constant(INT, value)
def is_bv_constant(self, value=None, width=None):
"""Test whether the formula is a BitVector constant.
Optionally, check that the constant has the given value.
"""
if value is None and width is None:
return self.node_type() == BV_CONSTANT
if width is None:
return self.is_constant(value=value)
else:
return self.is_constant(_type=BVType(width=width),
value=value)
def is_string_constant(self, value=None):
"""Test whether the formula is a String constant.
Optionally, check that the constant has the given value.
"""
return self.is_constant(STRING, value)
def is_algebraic_constant(self):
"""Test whether the formula is an Algebraic Constant"""
return self.node_type() == ALGEBRAIC_CONSTANT
def is_symbol(self, type_=None):
"""Test whether the formula is a Symbol.
Optionally, check that the symbol has the given type.
"""
if type_:
return self.node_type() == SYMBOL and \
self.symbol_type() == type_
return self.node_type() == SYMBOL
def is_literal(self):
"""Test whether the formula is a literal.
A literal is a positive or negative Boolean symbol.
"""
if self.is_symbol(BOOL):
return True
if self.is_not():
return self.arg(0).is_symbol(BOOL)
def is_true(self):
"""Test whether the formula is the True Boolean constant."""
return self.is_bool_constant(True)
def is_false(self):
"""Test whether the formula is the False Boolean constant."""
return self.is_bool_constant(False)
def is_one(self):
return self.is_real_constant(1) or self.is_int_constant(1)
def is_zero(self):
return self.is_real_constant(0) or self.is_int_constant(0)
def is_toreal(self):
"""Test whether the node is the ToReal operator."""
return self.node_type() == TOREAL
def is_forall(self):
"""Test whether the node is the ForAll operator."""
return self.node_type() == FORALL
def is_exists(self):
"""Test whether the node is the Exists operator."""
return self.node_type() == EXISTS
def is_quantifier(self):
"""Test whether the node is a Quantifier."""
return self.is_exists() or self.is_forall()
def is_and(self):
"""Test whether the node is the And operator."""
return self.node_type() == AND
def is_or(self):
"""Test whether the node is the Or operator."""
return self.node_type() == OR
def is_not(self):
"""Test whether the node is the Not operator."""
return self.node_type() == NOT
def is_plus(self):
"""Test whether the node is the Plus operator."""
return self.node_type() == PLUS
def is_minus(self):
"""Test whether the node is the Minus operator."""
return self.node_type() == MINUS
def is_times(self):
"""Test whether the node is the Times operator."""
return self.node_type() == TIMES
def is_div(self):
"""Test whether the node is the Division operator."""
return self.node_type() == DIV
def is_implies(self):
"""Test whether the node is the Implies operator."""
return self.node_type() == IMPLIES
def is_iff(self):
"""Test whether the node is the Iff operator."""
return self.node_type() == IFF
def is_ite(self):
"""Test whether the node is the Ite operator."""
return self.node_type() == ITE
def is_equals(self):
"""Test whether the node is the Equals operator."""
return self.node_type() == EQUALS
def is_le(self):
"""Test whether the node is the LE (less than equal) relation."""
return self.node_type() == LE
def is_lt(self):
"""Test whether the node is the LT (less than) relation."""
return self.node_type() == LT
def is_bool_op(self):
"""Test whether the node is a Boolean operator."""
return self.node_type() in BOOL_OPERATORS
def is_theory_relation(self):
"""Test whether the node is a theory relation."""
return self.node_type() in RELATIONS
def is_theory_op(self):
"""Test whether the node is a theory operator."""
return self.node_type() in THEORY_OPERATORS
def is_ira_op(self):
"""Test whether the node is an Int or Real Arithmetic operator."""
return self.node_type() in IRA_OPERATORS
@deprecated("is_isa_op")
def is_lira_op(self):
"""Test whether the node is a IRA operator."""
return self.node_type() in IRA_OPERATORS
def is_bv_op(self):
"""Test whether the node is a BitVector operator."""
return self.node_type() in BV_OPERATORS
def is_array_op(self):
"""Test whether the node is an array operator."""
return self.node_type() in ARRAY_OPERATORS
def is_bv_not(self):
"""Test whether the node is the BVNot operator."""
return self.node_type() == BV_NOT
def is_bv_and(self):
"""Test whether the node is the BVAnd operator."""
return self.node_type() == BV_AND
def is_bv_or(self):
"""Test whether the node is the BVOr operator."""
return self.node_type() == BV_OR
def is_bv_xor(self):
"""Test whether the node is the BVXor operator."""
return self.node_type() == BV_XOR
def is_bv_concat(self):
"""Test whether the node is the BVConcat operator."""
return self.node_type() == BV_CONCAT
def is_bv_extract(self):
"""Test whether the node is the BVConcat operator."""
return self.node_type() == BV_EXTRACT
def is_bv_ult(self):
"""Test whether the node is the BVULT (unsigned less than) relation."""
return self.node_type() == BV_ULT
def is_bv_ule(self):
"""Test whether the node is the BVULE (unsigned less than) relation."""
return self.node_type() == BV_ULE
def is_bv_neg(self):
"""Test whether the node is the BVNeg operator."""
return self.node_type() == BV_NEG
def is_bv_add(self):
"""Test whether the node is the BVAdd operator."""
return self.node_type() == BV_ADD
def is_bv_mul(self):
"""Test whether the node is the BVMul operator."""
return self.node_type() == BV_MUL
def is_bv_udiv(self):
"""Test whether the node is the BVUDiv operator."""
return self.node_type() == BV_UDIV
def is_bv_urem(self):
"""Test whether the node is the BVURem operator."""
return self.node_type() == BV_UREM
def is_bv_lshl(self):
"""Test whether the node is the BVLShl (logical shift left) operator."""
return self.node_type() == BV_LSHL
def is_bv_lshr(self):
"""Test whether the node is the BVLShr (logical shift right) operator."""
return self.node_type() == BV_LSHR
def is_bv_rol(self):
"""Test whether the node is the BVRol (rotate left) operator."""
return self.node_type() == BV_ROL
def is_bv_ror(self):
"""Test whether the node is the BVRor (rotate right) operator."""
return self.node_type() == BV_ROR
def is_bv_zext(self):
"""Test whether the node is the BVZext (zero extension) operator."""
return self.node_type() == BV_ZEXT
def is_bv_sext(self):
"""Test whether the node is the BVSext (signed extension) operator."""
return self.node_type() == BV_SEXT
def is_bv_sub(self):
"""Test whether the node is the BVSub (subtraction) operator."""
return self.node_type() == BV_SUB
def is_bv_slt(self):
"""Test whether the node is the BVSLT (signed less-than) operator."""
return self.node_type() == BV_SLT
def is_bv_sle(self):
"""Test whether the node is the BVSLE (signed less-than-or-equal-to) operator."""
return self.node_type() == BV_SLE
def is_bv_comp(self):
"""Test whether the node is the BVComp (comparison) operator."""
return self.node_type() == BV_COMP
def is_bv_sdiv(self):
"""Test whether the node is the BVSDiv (signed division) operator."""
return self.node_type() == BV_SDIV
def is_bv_srem(self):
"""Test whether the node is the BVSRem (signed reminder) operator."""
return self.node_type() == BV_SREM
def is_bv_ashr(self):
"""Test whether the node is the BVAshr (arithmetic shift right) operator."""
return self.node_type() == BV_ASHR
def is_select(self):
"""Test whether the node is the SELECT (array select) operator."""
return self.node_type() == ARRAY_SELECT
def is_store(self):
"""Test whether the node is the STORE (array store) operator."""
return self.node_type() == ARRAY_STORE
def is_array_value(self):
"""Test whether the node is an array value operator."""
return self.node_type() == ARRAY_VALUE
def bv_width(self):
"""Return the BV width of the formula."""
if self.is_bv_constant():
return self._content.payload[1]
elif self.is_symbol():
assert self.symbol_type().is_bv_type()
return self.symbol_type().width
elif self.is_function_application():
# Return width defined in the declaration
return self.function_name().symbol_type().return_type.width
elif self.is_ite():
# Recursively call bv_width on the left child
# (The right child has the same width if the node is well-formed)
width_l = self.arg(1).bv_width()
return width_l
elif self.is_select():
# This must be a select over an array with BV value type
ty = self.arg(0).get_type()
return ty.elem_type.width
else:
# BV Operator
assert self.is_bv_op(), "Unsupported method bv_width on %s" % self
return self._content.payload[0]
def bv_extract_start(self):
"""Return the starting index for BVExtract."""
assert self.is_bv_extract()
return self._content.payload[1]
def bv_extract_end(self):
"""Return the ending index for BVExtract."""
assert self.is_bv_extract()
return self._content.payload[2]
def bv_rotation_step(self):
"""Return the rotation step for BVRor and BVRol."""
assert self.is_bv_ror() or self.is_bv_rol()
return self._content.payload[1]
def bv_extend_step(self):
"""Return the extension step for BVZext and BVSext."""
assert self.is_bv_zext() or self.is_bv_sext()
return self._content.payload[1]
def __str__(self):
return self.serialize(threshold=5)
def __repr__(self):
return str(self)
def serialize(self, threshold=None):
"""Returns a human readable representation of the formula.
The threshold parameter can be used to limit the amount of the
formula that will be printed.
See :py:class:`HRSerializer`
"""
return _env().serializer.serialize(self, threshold=threshold)
def to_smtlib(self, daggify=True):
"""Returns a Smt-Lib string representation of the formula.
The daggify parameter can be used to switch from a linear-size
representation that uses 'let' operators to represent the
formula as a dag or a simpler (but possibly exponential)
representation that expands the formula as a tree.
See :py:class:`SmtPrinter`
"""
return pysmt.smtlib.printers.to_smtlib(self, daggify=daggify)
def is_function_application(self):
"""Test whether the node is a Function application."""
return self.node_type() == FUNCTION
def is_term(self):
"""Test whether the node is a term.
All nodes are terms, except for function definitions.
"""
return not (self.is_symbol() and self.symbol_type().is_function_type())
def is_str_op(self):
return self.node_type() in STR_OPERATORS
def symbol_type(self):
"""Return the type of the Symbol."""
assert self.is_symbol()
return self._content.payload[1]
def symbol_name(self):
"""Return the name of the Symbol."""
assert self.is_symbol()
return self._content.payload[0]
def constant_value(self):
"""Return the value of the Constant."""
assert self.is_constant()
if self.node_type() == BV_CONSTANT:
return self._content.payload[0]
return self._content.payload
def constant_type(self):
"""Return the type of the Constant."""
if self.node_type() == INT_CONSTANT:
return INT
elif self.node_type() == REAL_CONSTANT:
return REAL
elif self.node_type() == BOOL_CONSTANT:
return BOOL
elif self.node_type() == STR_CONSTANT:
return STRING
else:
assert self.node_type() == BV_CONSTANT,\
"Unsupported method constant_type '%s'" % self
return BVType(width=self.bv_width())
def bv2nat(self):
"""Return the unsigned value encoded by the BitVector."""
return self.bv_unsigned_value()
def bv_unsigned_value(self):
"""Return the unsigned value encoded by the BitVector."""
return self.constant_value()
def bv_signed_value(self):
"""Return the signed value encoded by the BitVector."""
return twos_complement(self.constant_value(), self.bv_width())
def bv_str(self, fmt='b'):
"""Return a string representation of the BitVector.
fmt: 'b' : Binary
'd' : Decimal
'x' : Hexadecimal
The representation is always unsigned
"""
if fmt == 'b':
fstr = '{0:0%db}' % self.bv_width()
elif fmt == 'd':
fstr = '{}'
else:
assert fmt == 'x', "Unknown option %s" % str(fmt)
fstr = '{0:0%dx}' % (self.bv_width()/4)
str_ = fstr.format(self.constant_value())
return str_
def bv_bin_str(self, reverse=False):
"""Return the binary representation of the BitVector as string.
The reverse option is provided to deal with MSB/LSB.
"""
bitstr = self.bv_str(fmt='b')
if reverse:
bitstr = bitstr[::-1]
return bitstr
def array_value_index_type(self):
assert self.is_array_value()
return self._content.payload
def array_value_get(self, index):
"""Returns the value of this Array Value at the given index. The
index must be a constant of the correct type.
This function is equivalent (but possibly faster) than the
following code::
m = self.array_value_assigned_values_map()
try:
return m[index]
except KeyError:
return self.array_value_default()
"""
assert index.is_constant()
args = self.args()
start = 0
end = (len(args) - 1) // 2
while (end - start) > 0:
pivot = (end + start) // 2
i = args[2 * pivot + 1]
if id(i) == id(index):
return args[2 * pivot + 2]
elif id(i) > id(index):
end = pivot
else:
start = pivot + 1
return self.array_value_default()
def array_value_assigned_values_map(self):
args = self.args()
return dict(zip(args[1::2], args[2::2]))
def array_value_default(self):
return self.args()[0]
def function_name(self):
"""Return the Function name."""
assert self.is_function_application()
return self._content.payload
def quantifier_vars(self):
"""Return the list of quantified variables."""
assert self.is_quantifier()
return self._content.payload
def algebraic_approx_value(self, precision=10):
value = self.constant_value()
approx = value.approx(precision)
# MG: This is a workaround python 3 since Z3 mixes int and long.
# The bug was fixed in master of Z3, but no official relase
# has been done containing it.
# In the most recent version of z3, this can be done with:
# return approx.as_fraction()
n = int(str(approx.numerator()))
d = int(str(approx.denominator()))
return Fraction(n,d)
# Infix Notation
@assert_infix_enabled
def _apply_infix(self, right, function, bv_function=None):
# Default bv_function to function
if bv_function is None:
bv_function = function
right = self._infix_prepare_arg(right, self.get_type())
if self.get_type().is_bv_type():
return bv_function(self, right)
return function(self, right)
@assert_infix_enabled
def _infix_prepare_arg(self, arg, expected_type):
mgr = _mgr()
if isinstance(arg, FNode):
return arg
# BVs
if expected_type.is_bv_type():
return mgr.BV(arg, width=expected_type.width)
# Boolean, Integer and Arithmetic
elif expected_type.is_bool_type():
return mgr.Bool(arg)
elif expected_type.is_int_type():
return mgr.Int(arg)
elif expected_type.is_real_type():
return mgr.Real(arg)
else:
raise PysmtValueError("Unsupported value '%s' in infix operator" % str(arg))
def Implies(self, right):
return self._apply_infix(right, _mgr().Implies)
def Iff(self, right):
return self._apply_infix(right, _mgr().Iff)
def Equals(self, right):
return self._apply_infix(right, _mgr().Equals)
def NotEquals(self, right):
return self._apply_infix(right, _mgr().NotEquals)
@assert_infix_enabled
def Ite(self, then_, else_):
if isinstance(then_, FNode) and isinstance(else_, FNode):
return _mgr().Ite(self, then_, else_)
else:
raise PysmtModeError("Cannot infix ITE with implicit argument types.")
def And(self, right):
return self._apply_infix(right, _mgr().And)
def Or(self, right):
return self._apply_infix(right, _mgr().Or)
# BV
def BVAnd(self, right):
return self._apply_infix(right, _mgr().BVAnd)
def BVAdd(self, right):
return self._apply_infix(right, _mgr().BVAdd)
def BVAShr(self, right):
return self._apply_infix(right, _mgr().BVAShr)
def BVComp(self, right):
return self._apply_infix(right, _mgr().BVComp)
def BVConcat(self, right):
return self._apply_infix(right, _mgr().BVConcat)
def BVExtract(self, start, stop):
return _mgr().BVExtract(self, start, stop)
def BVLShl(self, right):
return self._apply_infix(right, _mgr().BVLShl)
def BVLShr(self, right):
return self._apply_infix(right, _mgr().BVLShr)
def BVMul(self, right):
return self._apply_infix(right, _mgr().BVMul)
def BVNand(self, right):
return self._apply_infix(right, _mgr().BVNand)
def BVNor(self, right):
return self._apply_infix(right, _mgr().BVNor)
def BVOr(self, right):
return self._apply_infix(right, _mgr().BVOr)
def BVRepeat(self, count):
return _mgr().BVRepeat(self, count)
def BVRol(self, steps):
return _mgr().BVRol(self, steps)
def BVRor(self, steps):
return _mgr().BVRor(self, steps)
def BVSDiv(self, right):
return self._apply_infix(right, _mgr().BVSDiv)
def BVSExt(self, increase):
return _mgr().BVSExt(self, increase)
def BVSGE(self, right):
return self._apply_infix(right, _mgr().BVSGE)
def BVSGT(self, right):
return self._apply_infix(right, _mgr().BVSGT)
def BVSLE(self, right):
return self._apply_infix(right, _mgr().BVSLE)
def BVSLT(self, right):
return self._apply_infix(right, _mgr().BVSLT)
def BVSub(self, right):
return self._apply_infix(right, _mgr().BVSub)
def BVSMod(self, right):
return self._apply_infix(right, _mgr().BVSMod)
def BVSRem(self, right):
return self._apply_infix(right, _mgr().BVSRem)
def BVUDiv(self, right):
return self._apply_infix(right, _mgr().BVUDiv)
def BVUGE(self, right):
return self._apply_infix(right, _mgr().BVUGE)
def BVUGT(self, right):
return self._apply_infix(right, _mgr().BVUGT)
def BVULE(self, right):
return self._apply_infix(right, _mgr().BVULE)
def BVULT(self, right):
return self._apply_infix(right, _mgr().BVULT)
def BVURem(self, right):
return self._apply_infix(right, _mgr().BVURem)
def BVXor(self, right):
return self._apply_infix(right, _mgr().BVXor)
def BVXnor(self, right):
return self._apply_infix(right, _mgr().BVXnor)
def BVZExt(self, increase):
return _mgr().BVZExt(self, increase)
# Arrays
def Select(self, index):
return _mgr().Select(self, index)
def Store(self, index, value):
return _mgr().Store(self, index, value)
#
# Infix operators
#
def __add__(self, right):
return self._apply_infix(right, _mgr().Plus, _mgr().BVAdd)
def __radd__(self, right):
return self._apply_infix(right, _mgr().Plus, _mgr().BVAdd)
def __sub__(self, right):
return self._apply_infix(right, _mgr().Minus, _mgr().BVSub)
def __rsub__(self, left):
# Swap operators to perform right-subtract
# For BVs we might need to build the BV constant
if self.get_type().is_bv_type():
if is_python_integer(left):
left = _mgr().BV(left, width=self.bv_width())
return left._apply_infix(self, _mgr().BVSub)
# (x - y) = (-y + x)
minus_self = -self
return minus_self._apply_infix(left, _mgr().Plus)
def __mul__(self, right):
return self._apply_infix(right, _mgr().Times, _mgr().BVMul)
def __rmul__(self, right):
return self._apply_infix(right, _mgr().Times, _mgr().BVMul)
def __div__(self, right):
return self._apply_infix(right, _mgr().Div, _mgr().BVUDiv)
def __truediv__(self, right):
return self.__div__(right)
def __gt__(self, right):
return self._apply_infix(right, _mgr().GT, _mgr().BVUGT)
def __ge__(self, right):
return self._apply_infix(right, _mgr().GE, _mgr().BVUGE)
def __lt__(self, right):
return self._apply_infix(right, _mgr().LT, _mgr().BVULT)
def __le__(self, right):
return self._apply_infix(right, _mgr().LE, _mgr().BVULE)
def __and__(self, other):
return self._apply_infix(other, _mgr().And, _mgr().BVAnd)
def __rand__(self, other):
return self._apply_infix(other, _mgr().And, _mgr().BVAnd)
def __or__(self, other):
return self._apply_infix(other, _mgr().Or, _mgr().BVOr)
def __ror__(self, other):
return self._apply_infix(other, _mgr().Or, _mgr().BVOr)
def __xor__(self, other):
return self._apply_infix(other, _mgr().Xor, _mgr().BVXor)
def __rxor__(self, other):
return self._apply_infix(other, _mgr().Xor, _mgr().BVXor)
def __neg__(self):
if self.get_type().is_bv_type():
return _mgr().BVNeg(self)
return self._apply_infix(-1, _mgr().Times)
@assert_infix_enabled
def __invert__(self):
if self.get_type().is_bv_type():
return _mgr().BVNot(self)
return _mgr().Not(self)
@assert_infix_enabled
def __getitem__(self, idx):
if isinstance(idx, slice):
end = idx.stop
start = idx.start
if start is None: start = 0
else:
# Single point [idx]
end = idx
start = idx
if self.get_type().is_bv_type():
return _mgr().BVExtract(self, start=start, end=end)
raise UnsupportedOperatorError("Unsupported operation '__getitem__' on '%s'." %
str(self))
def __lshift__(self, right):
return self._apply_infix(right, None, bv_function=_mgr().BVLShl)
def __rshift__(self, right):
return self._apply_infix(right, None, bv_function=_mgr().BVLShr)
def __mod__(self, right):
return self._apply_infix(right, None, bv_function=_mgr().BVURem)
@assert_infix_enabled
def __call__(self, *args):
if self.is_symbol() and self.symbol_type().is_function_type():
types = self.symbol_type().param_types
if (len(types) != len(args)):
raise PysmtValueError("Wrong number of parameters passed in "
"infix 'call' operator")
args = [self._infix_prepare_arg(x, t) for x,t in zip(args, types)]
return _mgr().Function(self, args)
else:
raise PysmtValueError("Call operator can be applied to symbol "
"types having function type only")
# EOC FNode
def _env():
"""Aux function to obtain the environment."""
return pysmt.environment.get_env()
def _mgr():
"""Aux function to obtain the formula manager."""
return pysmt.environment.get_env().formula_manager
| |
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import eventlet
import netaddr
from oslo import messaging
from oslo.utils import excutils
from neutron.common import constants as l3_constants
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils as common_utils
from neutron import context as n_context
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.cfg_agent import cfg_exceptions
from neutron.plugins.cisco.cfg_agent.device_drivers import driver_mgr
from neutron.plugins.cisco.cfg_agent import device_status
from neutron.plugins.cisco.common import cisco_constants as c_constants
LOG = logging.getLogger(__name__)
N_ROUTER_PREFIX = 'nrouter-'
class RouterInfo(object):
"""Wrapper class around the (neutron) router dictionary.
Information about the neutron router is exchanged as a python dictionary
between plugin and config agent. RouterInfo is a wrapper around that dict,
with attributes for common parameters. These attributes keep the state
of the current router configuration, and are used for detecting router
state changes when an updated router dict is received.
This is a modified version of the RouterInfo class defined in the
(reference) l3-agent implementation, for use with cisco config agent.
"""
def __init__(self, router_id, router):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.floating_ips = []
self._router = None
self.router = router
self.routes = []
self.ha_info = router.get('ha_info')
@property
def router(self):
return self._router
@property
def id(self):
return self.router_id
@property
def snat_enabled(self):
return self._snat_enabled
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
def router_name(self):
return N_ROUTER_PREFIX + self.router_id
class CiscoRoutingPluginApi(object):
"""RoutingServiceHelper(Agent) side of the routing RPC API."""
def __init__(self, topic, host):
self.host = host
target = messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_routers(self, context, router_ids=None, hd_ids=None):
"""Make a remote process call to retrieve the sync data for routers.
:param context: session context
:param router_ids: list of routers to fetch
:param hd_ids : hosting device ids, only routers assigned to these
hosting devices will be returned.
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'cfg_sync_routers', host=self.host,
router_ids=router_ids, hosting_device_ids=hd_ids)
class RoutingServiceHelper(object):
def __init__(self, host, conf, cfg_agent):
self.conf = conf
self.cfg_agent = cfg_agent
self.context = n_context.get_admin_context_without_session()
self.plugin_rpc = CiscoRoutingPluginApi(topics.L3PLUGIN, host)
self._dev_status = device_status.DeviceStatus()
self._drivermgr = driver_mgr.DeviceDriverManager()
self.router_info = {}
self.updated_routers = set()
self.removed_routers = set()
self.sync_devices = set()
self.fullsync = True
self.topic = '%s.%s' % (c_constants.CFG_AGENT_L3_ROUTING, host)
self._setup_rpc()
def _setup_rpc(self):
self.conn = n_rpc.create_connection(new=True)
self.endpoints = [self]
self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
self.conn.consume_in_threads()
### Notifications from Plugin ####
def router_deleted(self, context, routers):
"""Deal with router deletion RPC message."""
LOG.debug('Got router deleted notification for %s', routers)
self.removed_routers.update(routers)
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug('Got routers updated notification :%s', routers)
if routers:
# This is needed for backward compatibility
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
self.updated_routers.update(routers)
def router_removed_from_agent(self, context, payload):
LOG.debug('Got router removed from agent :%r', payload)
self.removed_routers.add(payload['router_id'])
def router_added_to_agent(self, context, payload):
LOG.debug('Got router added to agent :%r', payload)
self.routers_updated(context, payload)
# Routing service helper public methods
def process_service(self, device_ids=None, removed_devices_info=None):
try:
LOG.debug("Routing service processing started")
resources = {}
routers = []
removed_routers = []
all_routers_flag = False
if self.fullsync:
LOG.debug("FullSync flag is on. Starting fullsync")
# Setting all_routers_flag and clear the global full_sync flag
all_routers_flag = True
self.fullsync = False
self.updated_routers.clear()
self.removed_routers.clear()
self.sync_devices.clear()
routers = self._fetch_router_info(all_routers=True)
else:
if self.updated_routers:
router_ids = list(self.updated_routers)
LOG.debug("Updated routers:%s", router_ids)
self.updated_routers.clear()
routers = self._fetch_router_info(router_ids=router_ids)
if device_ids:
LOG.debug("Adding new devices:%s", device_ids)
self.sync_devices = set(device_ids) | self.sync_devices
if self.sync_devices:
sync_devices_list = list(self.sync_devices)
LOG.debug("Fetching routers on:%s", sync_devices_list)
routers.extend(self._fetch_router_info(
device_ids=sync_devices_list))
self.sync_devices.clear()
if removed_devices_info:
if removed_devices_info.get('deconfigure'):
ids = self._get_router_ids_from_removed_devices_info(
removed_devices_info)
self.removed_routers = self.removed_routers | set(ids)
if self.removed_routers:
removed_routers_ids = list(self.removed_routers)
LOG.debug("Removed routers:%s", removed_routers_ids)
for r in removed_routers_ids:
if r in self.router_info:
removed_routers.append(self.router_info[r].router)
# Sort on hosting device
if routers:
resources['routers'] = routers
if removed_routers:
resources['removed_routers'] = removed_routers
hosting_devices = self._sort_resources_per_hosting_device(
resources)
# Dispatch process_services() for each hosting device
pool = eventlet.GreenPool()
for device_id, resources in hosting_devices.items():
routers = resources.get('routers')
removed_routers = resources.get('removed_routers')
pool.spawn_n(self._process_routers, routers, removed_routers,
device_id, all_routers=all_routers_flag)
pool.waitall()
if removed_devices_info:
for hd_id in removed_devices_info['hosting_data']:
self._drivermgr.remove_driver_for_hosting_device(hd_id)
LOG.debug("Routing service processing successfully completed")
except Exception:
LOG.exception(_LE("Failed processing routers"))
self.fullsync = True
def collect_state(self, configurations):
"""Collect state from this helper.
A set of attributes which summarizes the state of the routers and
configurations managed by this config agent.
:param configurations: dict of configuration values
:return dict of updated configuration values
"""
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
num_hd_routers = collections.defaultdict(int)
for ri in router_infos:
ex_gw_port = ri.router.get('gw_port')
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(
l3_constants.INTERFACE_KEY, []))
num_floating_ips += len(ri.router.get(
l3_constants.FLOATINGIP_KEY, []))
hd = ri.router['hosting_device']
if hd:
num_hd_routers[hd['id']] += 1
routers_per_hd = dict((hd_id, {'routers': num})
for hd_id, num in num_hd_routers.items())
non_responding = self._dev_status.get_backlogged_hosting_devices()
configurations['total routers'] = num_routers
configurations['total ex_gw_ports'] = num_ex_gw_ports
configurations['total interfaces'] = num_interfaces
configurations['total floating_ips'] = num_floating_ips
configurations['hosting_devices'] = routers_per_hd
configurations['non_responding_hosting_devices'] = non_responding
return configurations
# Routing service helper internal methods
def _fetch_router_info(self, router_ids=None, device_ids=None,
all_routers=False):
"""Fetch router dict from the routing plugin.
:param router_ids: List of router_ids of routers to fetch
:param device_ids: List of device_ids whose routers to fetch
:param all_routers: If True fetch all the routers for this agent.
:return: List of router dicts of format:
[ {router_dict1}, {router_dict2},.....]
"""
try:
if all_routers:
return self.plugin_rpc.get_routers(self.context)
if router_ids:
return self.plugin_rpc.get_routers(self.context,
router_ids=router_ids)
if device_ids:
return self.plugin_rpc.get_routers(self.context,
hd_ids=device_ids)
except messaging.MessagingException:
LOG.exception(_LE("RPC Error in fetching routers from plugin"))
self.fullsync = True
@staticmethod
def _get_router_ids_from_removed_devices_info(removed_devices_info):
"""Extract router_ids from the removed devices info dict.
:param removed_devices_info: Dict of removed devices and their
associated resources.
Format:
{
'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
'hd_id2': {'routers': [id3, id4, ...]},
...
},
'deconfigure': True/False
}
:return removed_router_ids: List of removed router ids
"""
removed_router_ids = []
for hd_id, resources in removed_devices_info['hosting_data'].items():
removed_router_ids += resources.get('routers', [])
return removed_router_ids
@staticmethod
def _sort_resources_per_hosting_device(resources):
"""This function will sort the resources on hosting device.
The sorting on hosting device is done by looking up the
`hosting_device` attribute of the resource, and its `id`.
:param resources: a dict with key of resource name
:return dict sorted on the hosting device of input resource. Format:
hosting_devices = {
'hd_id1' : {'routers':[routers],
'removed_routers':[routers], .... }
'hd_id2' : {'routers':[routers], .. }
.......
}
"""
hosting_devices = {}
for key in resources.keys():
for r in resources.get(key) or []:
hd_id = r['hosting_device']['id']
hosting_devices.setdefault(hd_id, {})
hosting_devices[hd_id].setdefault(key, []).append(r)
return hosting_devices
def _process_routers(self, routers, removed_routers,
device_id=None, all_routers=False):
"""Process the set of routers.
Iterating on the set of routers received and comparing it with the
set of routers already in the routing service helper, new routers
which are added are identified. Before processing check the
reachability (via ping) of hosting device where the router is hosted.
If device is not reachable it is backlogged.
For routers which are only updated, call `_process_router()` on them.
When all_routers is set to True (because of a full sync),
this will result in the detection and deletion of routers which
have been removed.
Whether the router can only be assigned to a particular hosting device
is decided and enforced by the plugin. No checks are done here.
:param routers: The set of routers to be processed
:param removed_routers: the set of routers which where removed
:param device_id: Id of the hosting device
:param all_routers: Flag for specifying a partial list of routers
:return: None
"""
try:
if all_routers:
prev_router_ids = set(self.router_info)
else:
prev_router_ids = set(self.router_info) & set(
[router['id'] for router in routers])
cur_router_ids = set()
for r in routers:
try:
if not r['admin_state_up']:
continue
cur_router_ids.add(r['id'])
hd = r['hosting_device']
if not self._dev_status.is_hosting_device_reachable(hd):
LOG.info(_LI("Router: %(id)s is on an unreachable "
"hosting device. "), {'id': r['id']})
continue
if r['id'] not in self.router_info:
self._router_added(r['id'], r)
ri = self.router_info[r['id']]
ri.router = r
self._process_router(ri)
except KeyError as e:
LOG.exception(_LE("Key Error, missing key: %s"), e)
self.updated_routers.add(r['id'])
continue
except cfg_exceptions.DriverException as e:
LOG.exception(_LE("Driver Exception on router:%(id)s. "
"Error is %(e)s"), {'id': r['id'], 'e': e})
self.updated_routers.update(r['id'])
continue
# identify and remove routers that no longer exist
for router_id in prev_router_ids - cur_router_ids:
self._router_removed(router_id)
if removed_routers:
for router in removed_routers:
self._router_removed(router['id'])
except Exception:
LOG.exception(_LE("Exception in processing routers on device:%s"),
device_id)
self.sync_devices.add(device_id)
def _process_router(self, ri):
"""Process a router, apply latest configuration and update router_info.
Get the router dict from RouterInfo and proceed to detect changes
from the last known state. When new ports or deleted ports are
detected, `internal_network_added()` or `internal_networks_removed()`
are called accordingly. Similarly changes in ex_gw_port causes
`external_gateway_added()` or `external_gateway_removed()` calls.
Next, floating_ips and routes are processed. Also, latest state is
stored in ri.internal_ports and ri.ex_gw_port for future comparisons.
:param ri : RouterInfo object of the router being processed.
:return:None
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.DriverException
if the configuration operation fails.
"""
try:
ex_gw_port = ri.router.get('gw_port')
ri.ha_info = ri.router.get('ha_info', None)
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
existing_port_ids = set([p['id'] for p in ri.internal_ports])
current_port_ids = set([p['id'] for p in internal_ports
if p['admin_state_up']])
new_ports = [p for p in internal_ports
if
p['id'] in (current_port_ids - existing_port_ids)]
old_ports = [p for p in ri.internal_ports
if p['id'] not in current_port_ids]
for p in new_ports:
self._set_subnet_info(p)
self._internal_network_added(ri, p, ex_gw_port)
ri.internal_ports.append(p)
for p in old_ports:
self._internal_network_removed(ri, p, ri.ex_gw_port)
ri.internal_ports.remove(p)
if ex_gw_port and not ri.ex_gw_port:
self._set_subnet_info(ex_gw_port)
self._external_gateway_added(ri, ex_gw_port)
elif not ex_gw_port and ri.ex_gw_port:
self._external_gateway_removed(ri, ri.ex_gw_port)
if ex_gw_port:
self._process_router_floating_ips(ri, ex_gw_port)
ri.ex_gw_port = ex_gw_port
self._routes_updated(ri)
except cfg_exceptions.DriverException as e:
with excutils.save_and_reraise_exception():
self.updated_routers.update(ri.router_id)
LOG.error(e)
def _process_router_floating_ips(self, ri, ex_gw_port):
"""Process a router's floating ips.
Compare current floatingips (in ri.floating_ips) with the router's
updated floating ips (in ri.router.floating_ips) and detect
flaoting_ips which were added or removed. Notify driver of
the change via `floating_ip_added()` or `floating_ip_removed()`.
:param ri: RouterInfo object of the router being processed.
:param ex_gw_port: Port dict of the external gateway port.
:return: None
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.DriverException
if the configuration operation fails.
"""
floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, [])
existing_floating_ip_ids = set(
[fip['id'] for fip in ri.floating_ips])
cur_floating_ip_ids = set([fip['id'] for fip in floating_ips])
id_to_fip_map = {}
for fip in floating_ips:
if fip['port_id']:
# store to see if floatingip was remapped
id_to_fip_map[fip['id']] = fip
if fip['id'] not in existing_floating_ip_ids:
ri.floating_ips.append(fip)
self._floating_ip_added(ri, ex_gw_port,
fip['floating_ip_address'],
fip['fixed_ip_address'])
floating_ip_ids_to_remove = (existing_floating_ip_ids -
cur_floating_ip_ids)
for fip in ri.floating_ips:
if fip['id'] in floating_ip_ids_to_remove:
ri.floating_ips.remove(fip)
self._floating_ip_removed(ri, ri.ex_gw_port,
fip['floating_ip_address'],
fip['fixed_ip_address'])
else:
# handle remapping of a floating IP
new_fip = id_to_fip_map[fip['id']]
new_fixed_ip = new_fip['fixed_ip_address']
existing_fixed_ip = fip['fixed_ip_address']
if (new_fixed_ip and existing_fixed_ip and
new_fixed_ip != existing_fixed_ip):
floating_ip = fip['floating_ip_address']
self._floating_ip_removed(ri, ri.ex_gw_port,
floating_ip,
existing_fixed_ip)
self._floating_ip_added(ri, ri.ex_gw_port,
floating_ip, new_fixed_ip)
ri.floating_ips.remove(fip)
ri.floating_ips.append(new_fip)
def _router_added(self, router_id, router):
"""Operations when a router is added.
Create a new RouterInfo object for this router and add it to the
service helpers router_info dictionary. Then `router_added()` is
called on the device driver.
:param router_id: id of the router
:param router: router dict
:return: None
"""
ri = RouterInfo(router_id, router)
driver = self._drivermgr.set_driver(router)
driver.router_added(ri)
self.router_info[router_id] = ri
def _router_removed(self, router_id, deconfigure=True):
"""Operations when a router is removed.
Get the RouterInfo object corresponding to the router in the service
helpers's router_info dict. If deconfigure is set to True,
remove this router's configuration from the hosting device.
:param router_id: id of the router
:param deconfigure: if True, the router's configuration is deleted from
the hosting device.
:return: None
"""
ri = self.router_info.get(router_id)
if ri is None:
LOG.warning(_LW("Info for router %s was not found. "
"Skipping router removal"), router_id)
return
ri.router['gw_port'] = None
ri.router[l3_constants.INTERFACE_KEY] = []
ri.router[l3_constants.FLOATINGIP_KEY] = []
try:
if deconfigure:
self._process_router(ri)
driver = self._drivermgr.get_driver(router_id)
driver.router_removed(ri, deconfigure)
self._drivermgr.remove_driver(router_id)
del self.router_info[router_id]
self.removed_routers.discard(router_id)
except cfg_exceptions.DriverException:
LOG.warning(_LW("Router remove for router_id: %s was incomplete. "
"Adding the router to removed_routers list"), router_id)
self.removed_routers.add(router_id)
# remove this router from updated_routers if it is there. It might
# end up there too if exception was thrown earlier inside
# `_process_router()`
self.updated_routers.discard(router_id)
def _internal_network_added(self, ri, port, ex_gw_port):
driver = self._drivermgr.get_driver(ri.id)
driver.internal_network_added(ri, port)
if ri.snat_enabled and ex_gw_port:
driver.enable_internal_network_NAT(ri, port, ex_gw_port)
def _internal_network_removed(self, ri, port, ex_gw_port):
driver = self._drivermgr.get_driver(ri.id)
driver.internal_network_removed(ri, port)
if ri.snat_enabled and ex_gw_port:
driver.disable_internal_network_NAT(ri, port, ex_gw_port)
def _external_gateway_added(self, ri, ex_gw_port):
driver = self._drivermgr.get_driver(ri.id)
driver.external_gateway_added(ri, ex_gw_port)
if ri.snat_enabled and ri.internal_ports:
for port in ri.internal_ports:
driver.enable_internal_network_NAT(ri, port, ex_gw_port)
def _external_gateway_removed(self, ri, ex_gw_port):
driver = self._drivermgr.get_driver(ri.id)
if ri.snat_enabled and ri.internal_ports:
for port in ri.internal_ports:
driver.disable_internal_network_NAT(ri, port, ex_gw_port)
driver.external_gateway_removed(ri, ex_gw_port)
def _floating_ip_added(self, ri, ex_gw_port, floating_ip, fixed_ip):
driver = self._drivermgr.get_driver(ri.id)
driver.floating_ip_added(ri, ex_gw_port, floating_ip, fixed_ip)
def _floating_ip_removed(self, ri, ex_gw_port, floating_ip, fixed_ip):
driver = self._drivermgr.get_driver(ri.id)
driver.floating_ip_removed(ri, ex_gw_port, floating_ip, fixed_ip)
def _routes_updated(self, ri):
"""Update the state of routes in the router.
Compares the current routes with the (configured) existing routes
and detect what was removed or added. Then configure the
logical router in the hosting device accordingly.
:param ri: RouterInfo corresponding to the router.
:return: None
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.DriverException
if the configuration operation fails.
"""
new_routes = ri.router['routes']
old_routes = ri.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
driver = self._drivermgr.get_driver(ri.id)
driver.routes_updated(ri, 'replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
driver = self._drivermgr.get_driver(ri.id)
driver.routes_updated(ri, 'delete', route)
ri.routes = new_routes
@staticmethod
def _set_subnet_info(port):
ips = port['fixed_ips']
if not ips:
raise Exception(_("Router port %s has no IP address") % port['id'])
if len(ips) > 1:
LOG.error(_LE("Ignoring multiple IPs on router port %s"),
port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
| |
# -*- coding: utf-8 -*-
""" Sahana Eden Assets Model
@copyright: 2009-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("AssetModel",
#"AssetHRModel",
#"AssetTeamModel",
"AssetTelephoneModel",
#"asset_rheader",
"asset_types",
"asset_log_status",
"asset_controller",
"asset_AssetRepresent",
)
import json
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3PopupLink
# Dependency list for translate-module
depends = ["supply"]
ASSET_TYPE_VEHICLE = 1 # => Extra Tab(s) for Registration Documents, Fuel Efficiency
#ASSET_TYPE_RADIO = 2 # => Extra Tab(s) for Radio Channels/Frequencies
ASSET_TYPE_TELEPHONE = 3 # => Extra Tab(s) for Contact Details & Airtime Billing
ASSET_TYPE_OTHER = 4 # => No extra Tabs
# To pass to global scope
asset_types = {"VEHICLE" : ASSET_TYPE_VEHICLE,
#"RADIO" : ASSET_TYPE_RADIO,
"TELEPHONE" : ASSET_TYPE_TELEPHONE,
"OTHER" : ASSET_TYPE_OTHER,
}
ASSET_LOG_SET_BASE = 1
ASSET_LOG_ASSIGN = 2
ASSET_LOG_RETURN = 3
ASSET_LOG_CHECK = 4
ASSET_LOG_REPAIR = 5
ASSET_LOG_DONATED = 32
ASSET_LOG_LOST = 33
ASSET_LOG_STOLEN = 34
ASSET_LOG_DESTROY = 35
# To pass to global scope
asset_log_status = {"SET_BASE" : ASSET_LOG_SET_BASE,
"ASSIGN" : ASSET_LOG_ASSIGN,
"RETURN" : ASSET_LOG_RETURN,
"CHECK" : ASSET_LOG_CHECK,
"REPAIR" : ASSET_LOG_REPAIR,
"DONATED" : ASSET_LOG_DONATED,
"LOST" : ASSET_LOG_LOST,
"STOLEN" : ASSET_LOG_STOLEN,
"DESTROY" : ASSET_LOG_DESTROY,
}
# =============================================================================
class AssetModel(S3Model):
"""
Asset Management
"""
names = ("asset_asset",
"asset_item",
"asset_log",
"asset_asset_id",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
user = auth.user
LOGGED_IN = auth.is_logged_in()
org_site_types = auth.org_site_types
s3 = current.response.s3
item_id = self.supply_item_id
item_entity_id = self.supply_item_entity_id
location_id = self.gis_location_id
organisation_id = self.org_organisation_id
person_id = self.pr_person_id
NONE = current.messages["NONE"]
settings = current.deployment_settings
org_site_label = settings.get_org_site_label()
#radios = settings.get_asset_radios()
telephones = settings.get_asset_telephones()
vehicles = settings.has_module("vehicle")
types = telephones or vehicles
# Shortcuts
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
super_link = self.super_link
is_float_represent = IS_FLOAT_AMOUNT.represent
float_represent = lambda v: is_float_represent(v, precision=2)
#--------------------------------------------------------------------------
# Assets
#
asset_type_opts = {ASSET_TYPE_OTHER : T("Other"),
}
#if radios:
# asset_type_opts[ASSET_TYPE_RADIO] = T("Radio")
if telephones:
asset_type_opts[ASSET_TYPE_TELEPHONE] = T("Telephone")
if vehicles:
asset_type_opts[ASSET_TYPE_VEHICLE] = T("Vehicle")
asset_condition_opts = {1: T("Good Condition"),
2: T("Minor Damage"),
3: T("Major Damage"),
4: T("Un-Repairable"),
5: T("Needs Maintenance"),
}
# @ToDo: make this lookup Lazy (also in event.py)
ctable = self.supply_item_category
asset_categories = db(ctable.can_be_asset == True).select(ctable.id)
asset_category_ids = [row.id for row in asset_categories]
supply_item_represent = self.supply_item_represent
tablename = "asset_asset"
define_table(tablename,
# Instances
super_link("track_id", "sit_trackable"),
super_link("doc_id", "doc_entity"),
item_entity_id(),
Field("number",
label = T("Asset Number"),
),
Field("type", "integer",
# @ToDo: We could set this automatically based on Item Category
default = ASSET_TYPE_OTHER,
label = T("Type"),
represent = S3Represent(options = asset_type_opts),
requires = IS_IN_SET(asset_type_opts),
readable = types,
writable = types,
),
item_id(represent = supply_item_represent,
requires = IS_ONE_OF(db, "supply_item.id",
supply_item_represent,
filterby = "item_category_id",
filter_opts = asset_category_ids,
sort = True,
),
script = None, # No Item Pack Filter
widget = None,
),
Field("kit", "boolean",
default = False,
label = T("Kit?"),
represent = lambda opt: T("Yes") if opt else NONE,
# @ToDo: deployment_setting
readable = False,
writable = False,
),
organisation_id(default = user.organisation_id if LOGGED_IN else None,
requires = self.org_organisation_requires(updateable = True,
#required = True,
),
required = True,
script = \
'''$.filterOptionsS3({
'trigger':'organisation_id',
'target':'site_id',
'lookupResource':'site',
'lookupPrefix':'org',
'lookupField':'site_id',
'lookupURL':S3.Ap.concat('/org/sites_for_org.json/')
})''',
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
default = user.site_id if LOGGED_IN else None,
empty = False,
instance_types = org_site_types,
label = org_site_label,
ondelete = "RESTRICT",
readable = True,
writable = True,
represent = self.org_site_represent,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class = "tooltip",
# _title = "%s|%s" % (T("Warehouse"),
# messages.AUTOCOMPLETE_HELP,
# ),
# ),
),
Field("sn",
label = T("Serial Number"),
),
organisation_id("supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL",
),
s3_date("purchase_date",
label = T("Purchase Date"),
),
Field("purchase_price", "double",
#default = 0.00,
label = T("Purchase Price"),
represent = float_represent,
),
s3_currency("purchase_currency"),
# Base Location, which should always be a Site & set via Log
location_id(readable = False,
writable = False,
),
# Populated onaccept of the log to make a component tab
person_id("assigned_to_id",
readable = False,
writable = False,
comment = self.pr_person_comment(child = "assigned_to_id"),
),
# Populated onaccept of the log for reporting/filtering
Field("cond", "integer",
label = T("Condition"),
represent = S3Represent(options = asset_condition_opts),
#readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ASSET = T("Create Asset")
crud_strings[tablename] = Storage(
label_create = ADD_ASSET,
title_display = T("Asset Details"),
title_list = T("Assets"),
title_update = T("Edit Asset"),
title_upload = T("Import Assets"),
label_list_button = T("List Assets"),
label_delete_button = T("Delete Asset"),
msg_record_created = T("Asset added"),
msg_record_modified = T("Asset updated"),
msg_record_deleted = T("Asset deleted"),
msg_list_empty = T("No Assets currently registered"),
)
asset_represent = asset_AssetRepresent(show_link = True)
# Reusable Field
asset_id = S3ReusableField("asset_id", "reference %s" % tablename,
label = T("Asset"),
ondelete = "CASCADE",
represent = asset_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "asset_asset.id",
asset_represent,
sort = True,
)),
sortby = "number",
comment = S3PopupLink(c = "asset",
f = "asset",
label = ADD_ASSET,
title = T("Asset"),
),
)
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
list_fields = ["item_id$item_category_id",
"item_id",
"number",
#"type",
#"purchase_date",
(T("Assigned To"), "assigned_to_id"),
"organisation_id",
"site_id",
]
report_fields = ["number",
(T("Category"), "item_id$item_category_id"),
(T("Item"), "item_id"),
"organisation_id",
"site_id",
"cond",
]
text_fields = ["number",
"item_id$name",
#"item_id$category_id$name",
"comments",
]
for level in levels:
lfield = "location_id$%s" % level
report_fields.append(lfield)
text_fields.append(lfield)
list_fields.append(lfield)
list_fields.extend(("cond",
"comments",
))
if settings.get_org_branches():
org_filter = S3HierarchyFilter("organisation_id",
# Can be unhidden in customise_xx_resource if there is a need to use a default_filter
hidden = True,
leafonly = False,
)
else:
org_filter = S3OptionsFilter("organisation_id",
search = True,
header = "",
# Can be unhidden in customise_xx_resource if there is a need to use a default_filter
hidden = True,
)
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
comment = T("You can search by asset number, item description or comments. You may use % as wildcard. Press 'Search' without input to list all assets."),
#_class = "filter-search",
),
S3OptionsFilter("item_id$item_category_id",
),
org_filter,
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter("cond",
hidden = True,
),
]
report_options = Storage(
rows = report_fields,
cols = report_fields,
fact = [(T("Number of items"), "count(number)")],
defaults=Storage(cols = "location_id$%s" % levels[0], # Highest-level of hierarchy
fact = "count(number)",
rows = "item_id$item_category_id",
totals = True,
)
)
# Default summary
summary = [{"name": "addform",
"common": True,
"widgets": [{"method": "create"}],
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
]
# Resource Configuration
configure(tablename,
context = {"incident": "incident.id",
"location": "location_id",
"organisation": "organisation_id",
},
# Open Tabs after creation
create_next = URL(c="asset", f="asset",
args = ["[id]"],
),
deduplicate = S3Duplicate(primary = ("number",
),
secondary = ("site_id",
"organisation_id",
),
ignore_case = False,
),
filter_widgets = filter_widgets,
list_fields = list_fields,
mark_required = ("organisation_id",),
onaccept = self.asset_onaccept,
realm_components = ("log", "presence"),
report_options = report_options,
summary = summary,
super_entity = ("doc_entity", "sit_trackable", "supply_item_entity"),
update_realm = True,
)
# Components
add_components(tablename,
asset_group = "asset_id",
asset_item = "asset_id",
asset_log = "asset_id",
asset_human_resource = "asset_id",
asset_telephone = "asset_id",
asset_telephone_usage = "asset_id",
event_incident = {"link": "event_asset",
"joinby": "asset_id",
"key": "incident_id",
"actuate": "hide",
},
hrm_human_resource = {"link": "asset_human_resource",
"joinby": "asset_id",
"key": "human_resource_id",
"actuate": "hide",
},
vehicle_gps = "asset_id",
vehicle_vehicle = {"joinby": "asset_id",
"multiple": False,
},
)
# =====================================================================
# Asset Items
# - to allow building ad-hoc Kits
#
tablename = "asset_item"
define_table(tablename,
item_entity_id(),
asset_id(ondelete = "CASCADE"),
item_id(represent = supply_item_represent,
requires = IS_ONE_OF(db, "supply_item.id",
supply_item_represent,
filterby = "item_category_id",
filter_opts = asset_category_ids,
sort = True,
),
script = None, # No Item Pack Filter
widget = None,
),
Field("quantity", "integer", notnull=True,
default = 1,
label = T("Quantity"),
requires = IS_INT_IN_RANGE(1, None),
),
Field("sn",
label = T("Serial Number"),
),
organisation_id("supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL",
),
s3_date("purchase_date",
label = T("Purchase Date"),
),
Field("purchase_price", "double",
#default = 0.00,
represent = float_represent,
),
s3_currency("purchase_currency"),
# Base Location, which should always be a Site & set via Log
location_id(readable = False,
writable = False,
),
s3_comments(comment = None),
*s3_meta_fields())
# =====================================================================
# Asset Log
#
asset_log_status_opts = {ASSET_LOG_SET_BASE : T("Base %(facility)s Set") % {"facility": org_site_label},
ASSET_LOG_ASSIGN : T("Assigned"),
ASSET_LOG_RETURN : T("Returned"),
ASSET_LOG_CHECK : T("Checked"),
ASSET_LOG_REPAIR : T("Repaired"),
ASSET_LOG_DONATED : T("Donated"),
ASSET_LOG_LOST : T("Lost"),
ASSET_LOG_STOLEN : T("Stolen"),
ASSET_LOG_DESTROY : T("Destroyed"),
}
if auth.permission.format == "html":
# T isn't JSON serializable
site_types = {}
for key in list(org_site_types.keys()):
site_types[key] = s3_str(org_site_types[key])
site_types = json.dumps(site_types)
script = '''
$.filterOptionsS3({
'trigger':'organisation_id',
'target':'site_id',
'lookupPrefix':'org',
'lookupResource':'site',
'lookupField':'site_id',
'fncRepresent': function(record,PrepResult){
var InstanceTypeNice=%(instance_type_nice)s
return record.name+" ("+InstanceTypeNice[record.instance_type]+")"
}})''' % {"instance_type_nice": site_types}
else:
script = None
tablename = "asset_log"
define_table(tablename,
asset_id(),
Field("status", "integer",
label = T("Status"),
represent = S3Represent(options = asset_log_status_opts),
requires = IS_IN_SET(asset_log_status_opts),
),
s3_datetime(default = "now",
empty = False,
represent = "date",
),
s3_datetime("date_until",
label = T("Date Until"),
represent = "date",
),
person_id(label = T("Assigned To")),
Field("check_in_to_person", "boolean",
label = T("Track with this Person?"),
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Track with this Person?"),
T("If selected, then this Asset's Location will be updated whenever the Person's Location is updated."),
),
),
readable = False,
writable = False,
),
# The Organisation to whom the loan is made
organisation_id(readable = False,
widget = None,
writable = False,
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
#default = user.site_id if LOGGED_IN else None,
empty = False,
label = org_site_label,
#filterby = "site_id",
#filter_opts = auth.permitted_facilities(redirect_on_error=False),
instance_types = org_site_types,
not_filterby = "obsolete",
not_filter_opts = (True,),
represent = self.org_site_represent,
readable = True,
writable = True,
script = script,
updateable = True,
#widget = S3SiteAutocompleteWidget(),
),
self.org_site_layout_id(# This has the URL adjusted for the right site_id by s3.asset_log.js
comment = S3PopupLink(c = "org",
f = "site",
args = ["[id]", "layout", "create"],
vars = {"prefix": "asset",
"parent": "log",
"child": "layout_id",
},
label = T("Create Location"),
_id = "asset_log_layout_id-create-btn",
),
),
Field("cancel", "boolean",
default = False,
label = T("Cancel Log Entry"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Cancel Log Entry"),
T("'Cancel' will indicate an asset log entry did not occur"),
),
),
),
Field("cond", "integer", # condition is a MySQL reserved word
label = T("Condition"),
represent = S3Represent(options = asset_condition_opts),
requires = IS_IN_SET(asset_condition_opts,
zero = "%s..." % T("Please select")),
),
person_id("by_person_id",
default = auth.s3_logged_in_person(), # This can either be the Asset controller if signed-out from the store
label = T("Assigned By"), # or the previous owner if passed on directly (e.g. to successor in their post)
comment = self.pr_person_comment(child = "by_person_id"),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("New Entry in Asset Log"),
title_display = T("Asset Log Details"),
title_list = T("Asset Log"),
title_update = T("Edit Asset Log Entry"),
label_list_button = T("Asset Log"),
label_delete_button = T("Delete Asset Log Entry"),
msg_record_created = T("Entry added to Asset Log"),
msg_record_modified = T("Asset Log Entry updated"),
msg_record_deleted = T("Asset Log Entry deleted"),
msg_list_empty = T("Asset Log Empty"),
)
# Resource configuration
configure(tablename,
listadd = False,
list_fields = ["date",
"status",
"date_until",
"organisation_id",
"site_id",
"layout_id",
"person_id",
#"location_id",
"cancel",
"cond",
"comments",
],
onaccept = self.asset_log_onaccept,
orderby = "asset_log.date desc",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"asset_asset_id": asset_id,
"asset_represent": asset_represent,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Return safe defaults for names in case the model is disabled """
return {"asset_asset_id": S3ReusableField.dummy("asset_id"),
}
# -------------------------------------------------------------------------
@staticmethod
def asset_onaccept(form):
"""
After DB I/O
"""
if current.response.s3.bulk:
# Import or Sync
return
db = current.db
atable = db.asset_asset
form_vars_get = form.vars.get
asset_id = form_vars_get("id")
kit = form_vars_get("kit")
organisation_id = form_vars_get("organisation_id")
site_id = form_vars_get("site_id")
if not organisation_id or not site_id:
# Component Tab: load record to read
asset = db(atable.id == asset_id).select(atable.organisation_id,
atable.site_id,
limitby = (0, 1),
).first()
organisation_id = asset.organisation_id
site_id = asset.site_id
record = form.record
if record:
old_site_id = record.site_id
else:
old_site_id = None
if site_id and \
site_id != old_site_id:
# Set the Base Location
stable = db.org_site
site = db(stable.site_id == site_id).select(stable.location_id,
limitby = (0, 1),
).first()
location_id = site.location_id
tracker = S3Tracker()
asset_tracker = tracker(atable, asset_id)
asset_tracker.set_base_location(location_id)
if kit:
# Also populate location_id field in component items
aitable = db.asset_item
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Add a log entry for this
ltable = db.asset_log
ltable.insert(asset_id = asset_id,
status = ASSET_LOG_SET_BASE,
organisation_id = organisation_id,
site_id = site_id,
cond = 1,
)
if kit:
# Empty any inappropriate fields
db(atable.id == asset_id).update(supplier_org_id = None,
purchase_date = None,
purchase_price = None,
purchase_currency = None,
)
else:
# Delete any component items
aitable = db.asset_item
ids = db(aitable.asset_id == asset_id).select(aitable.id).as_list()
if ids:
resource = current.s3db.resource("asset_item",
id = ids,
)
resource.delete()
# -------------------------------------------------------------------------
@staticmethod
def asset_log_onaccept(form):
"""
After DB I/O
"""
# Custom methods to allow form customization for specific cases
# Original method passed from asset_log_prep()
method = current.response.s3.asset_log_method
if method == "setbase":
status = ASSET_LOG_SET_BASE
elif method in ("assignperson", "assignsite", "assignorg"):
status = ASSET_LOG_ASSIGN
else:
status = None
if not status:
if not current.response.s3.asset_import:
# e.g. Record merger or Sync
return
# Import
db = current.db
form_vars = form.vars
asset_id = form_vars.asset_id
status = int(form_vars.status)
new = True
else:
# Interactive
form_vars = form.vars
status = int(form_vars.status or status)
db = current.db
ltable = db.asset_log
row = db(ltable.id == form_vars.id).select(ltable.asset_id,
limitby = (0, 1),
).first()
try:
asset_id = row.asset_id
except:
return
current_log = asset_get_current_log(asset_id)
log_time = current_log.date
current_time = form_vars.get("date").replace(tzinfo = None)
new = log_time <= current_time
if new:
# This is a current assignment
atable = db.asset_asset
aitable = db.asset_item
tracker = S3Tracker()
asset_tracker = tracker(atable, asset_id)
if status == ASSET_LOG_SET_BASE:
# Set Base Location
site_id = form_vars.get("site_id")
stable = db.org_site
site = db(stable.site_id == site_id).select(stable.location_id,
limitby = (0, 1),
).first()
location_id = site.location_id
asset_tracker.set_base_location(location_id)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
elif status == ASSET_LOG_ASSIGN:
if method == "assignsite":
asset_tracker.check_in(db.org_site, form_vars.site_id,
timestmp = current.request.utcnow)
# Also do component items
locations = asset_tracker.get_location(_fields = [db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
elif method == "assignorg":
site_id = form_vars.get("site_id")
if site_id:
asset_tracker.check_in(db.org_site, site_id,
timestmp = current.request.utcnow)
# Also do component items
locations = asset_tracker.get_location(_fields = [db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
else:
# We can no longer track location
asset_tracker.check_out()
else:
if form_vars.check_in_to_person:
asset_tracker.check_in(db.pr_person, form_vars.person_id,
timestmp = current.request.utcnow)
# Also do component items
# @ToDo: Have these move when the person moves
locations = asset_tracker.get_location(_fields = [db.gis_location.id])
try:
db(aitable.asset_id == asset_id).update(location_id = locations[0].id)
except:
pass
else:
location_id = asset_tracker.set_location(form_vars.person_id,
timestmp = current.request.utcnow)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Update main record for component
db(atable.id == asset_id).update(assigned_to_id = form_vars.person_id)
elif status == ASSET_LOG_RETURN:
# Set location to base location
location_id = asset_tracker.set_location(asset_tracker,
timestmp = current.request.utcnow)
# Also do component items
db(aitable.asset_id == asset_id).update(location_id = location_id)
# Update condition in main record
db(atable.id == asset_id).update(cond = form_vars.cond)
# =============================================================================
#class AssetHRModel(S3Model):
# """
# Optionally link Assets to Human Resources
# - useful for staffing a vehicle
# """
# names = ("asset_human_resource",)
# def model(self):
# #T = current.T
# # ---------------------------------------------------------------------
# # Assets <> Human Resources
# #
# tablename = "asset_human_resource"
# self.define_table(tablename,
# self.asset_asset_id(empty = False),
# self.hrm_human_resource_id(empty = False,
# ondelete = "CASCADE",
# ),
# #s3_comments(),
# *s3_meta_fields())
# # ---------------------------------------------------------------------
# # Pass names back to global scope (s3.*)
# #
# return {}
# =============================================================================
#class AssetTeamModel(S3Model):
# """
# Optionally link Assets to Teams
# """
# names = ("asset_group",)
# def model(self):
# #T = current.T
# # ---------------------------------------------------------------------
# # Assets <> Groups
# #
# tablename = "asset_group"
# self.define_table(tablename,
# self.asset_asset_id(empty = False),
# self.pr_group_id(comment = None,
# empty = False,
# ),
# #s3_comments(),
# *s3_meta_fields())
# # ---------------------------------------------------------------------
# # Pass names back to global scope (s3.*)
# #
# return {}
# =============================================================================
class AssetTelephoneModel(S3Model):
"""
Extend the Assset Module for Telephones:
Usage Costs
"""
names = ("asset_telephone",
"asset_telephone_usage",
)
def model(self):
T = current.T
#--------------------------------------------------------------------------
# Asset Telephones
#
tablename = "asset_telephone"
self.define_table(tablename,
self.asset_asset_id(empty = False),
# @ToDo: Filter to Suppliers
self.org_organisation_id(label = T("Airtime Provider")),
# We'll need something more complex here as there may be a per-month cost with bundled units
#Field("unit_cost", "double",
# label = T("Unit Cost"),
# ),
s3_comments(),
*s3_meta_fields())
#--------------------------------------------------------------------------
# Telephone Usage Costs
#
# @ToDo: Virtual Fields for Month/Year for Reporting
#
tablename = "asset_telephone_usage"
self.define_table(tablename,
self.asset_asset_id(empty = False),
s3_date(label = T("Start Date")),
# @ToDo: Validation to ensure not before Start Date
s3_date("end_date",
label = T("End Date"),
start_field = "asset_telephone_usage_date",
default_interval = 1,
),
Field("units_used", "double", # 'usage' is a reserved word in MySQL
label = T("Usage"),
),
# mins, Mb (for BGANs)
#Field("unit",
# label = T("Usage"),
# ),
# @ToDo: Calculate this from asset_telephone fields
#Field("cost", "double",
# label = T("Cost"),
# ),
#s3_currency(),
s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
def asset_get_current_log(asset_id):
"""
Get the current log entry for this asset
"""
table = current.s3db.asset_log
query = (table.asset_id == asset_id) & \
(table.cancel == False)
# Get the log with the maximum time
asset_log = current.db(query).select(table.id,
table.status,
table.date,
table.cond,
table.person_id,
table.organisation_id,
table.site_id,
#table.location_id,
limitby = (0, 1),
orderby = ~table.date,
).first()
if asset_log:
return Storage(date = asset_log.date,
person_id = asset_log.person_id,
cond = int(asset_log.cond or 0),
status = int(asset_log.status or 0),
organisation_id = asset_log.organisation_id,
site_id = asset_log.site_id,
#location_id = asset_log.location_id
)
else:
return Storage()
# =============================================================================
def asset_log_prep(r):
"""
Called by Controller
"""
T = current.T
db = current.db
table = db.asset_log
# Custom methods to allow form customization for specific cases
method = r.method
# Pass method via response.s3.asset_log_method to asset_log_onaccept()
current.response.s3.asset_log_method = method
if method == "update":
# Can only Cancel entry
for f in table.fields:
if f == "cancel":
continue
else:
table[f].writable = False
return
elif method == "read":
return
# This causes an error with the dataTables paginate
# if used only in r.interactive & not also r.representation=="aadata"
table.cancel.readable = table.cancel.writable = False
current_log = asset_get_current_log(r.id)
if method == "setbase":
status = ASSET_LOG_SET_BASE
r.method = "create"
elif method in ("assignperson", "assignsite", "assignorg"):
status = ASSET_LOG_ASSIGN
r.method = "create"
elif method == "return":
status = ASSET_LOG_RETURN
r.method = "create"
else:
status = 0
if status:
field = table.status
field.default = status
field.readable = field.writable = False
elif current_log:
table.status.default = current_log.status
crud_strings = current.response.s3.crud_strings.asset_log
if status == ASSET_LOG_SET_BASE:
crud_strings.msg_record_created = T("Base Facility/Site Set")
table.by_person_id.label = T("Set By")
table.date_until.readable = table.date_until.writable = False
table.person_id.readable = table.person_id.writable = False
table.organisation_id.readable = table.organisation_id.writable = True
# Start Empty
table.layout_id.widget.filter = (FS("site_id") == 0)
elif status == ASSET_LOG_RETURN:
crud_strings.msg_record_created = T("Returned")
table.date_until.readable = table.date_until.writable = False
table.person_id.readable = table.person_id.writable = False
table.by_person_id.default = current_log.person_id
table.by_person_id.label = T("Returned By")
#table.organisation_id.default = current_log.organisation_id
#table.site_id.default = current_log.site_id
table.site_id.readable = table.site_id.writable = False
table.layout_id.readable = table.layout_id.writable = False
elif status == ASSET_LOG_ASSIGN:
if method == "assignperson":
crud_strings.msg_record_created = T("Assigned to Person")
table.person_id.requires = table.person_id.requires.other
table.check_in_to_person.readable = table.check_in_to_person.writable = True
table.site_id.requires = IS_EMPTY_OR(table.site_id.requires)
# Start Empty
table.layout_id.widget.filter = (FS("site_id") == 0)
elif method == "assignsite":
crud_strings.msg_record_created = T("Assigned to Facility/Site")
# Start Empty
table.layout_id.widget.filter = (FS("site_id") == 0)
elif method == "assignorg":
crud_strings.msg_record_created = T("Assigned to Organization")
field = table.organisation_id
field.readable = field.writable = True
field.requires = field.requires.other
table.site_id.requires = IS_EMPTY_OR(table.site_id.requires)
# Start Empty
table.layout_id.widget.filter = (FS("site_id") == 0)
else:
# Can Update Status &/or Condition
crud_strings.msg_record_created = T("Status Updated")
table.by_person_id.label = T("Updated By")
table.date_until.readable = table.date_until.writable = False
table.person_id.readable = table.person_id.writable = False
table.site_id.readable = table.site_id.writable = False
table.layout_id.readable = table.layout_id.writable = False
field = table.status
field.readable = field.writable = True
field.requires = IS_IN_SET({ASSET_LOG_CHECK : T("Check"),
ASSET_LOG_REPAIR : T("Repair"),
ASSET_LOG_DONATED : T("Donated"),
ASSET_LOG_LOST : T("Lost"),
ASSET_LOG_STOLEN : T("Stolen"),
ASSET_LOG_DESTROY : T("Destroyed"),
})
# =============================================================================
def asset_rheader(r):
""" Resource Header for Assets """
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
s3 = current.response.s3
NONE = current.messages["NONE"]
if record.type == ASSET_TYPE_TELEPHONE:
tabs = [(T("Asset Details"), None, {"native": True}),
(T("Telephone Details"), "telephone"),
(T("Usage"), "telephone_usage"),
]
#elif record.type == s3.asset.ASSET_TYPE_RADIO:
# tabs.append((T("Radio Details"), "radio"))
elif record.type == ASSET_TYPE_VEHICLE:
STAFF = current.deployment_settings.get_hrm_staff_label()
tabs = [(T("Asset Details"), None, {"native": True}),
(T("Vehicle Details"), "vehicle"),
(STAFF, "human_resource"),
(T("Assign %(staff)s") % {"staff": STAFF}, "assign"),
(T("Check-In"), "check-in"),
(T("Check-Out"), "check-out"),
(T("GPS Data"), "presence"),
]
else:
# Generic Asset
tabs = [(T("Edit Details"), None)]
tabs += [(T("Log"), "log"),
(T("Documents"), "document"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
asset_id = record.id
current_log = asset_get_current_log(asset_id)
status = current_log.status
table = r.table
ltable = s3db.asset_log
rheader = DIV(TABLE(TR(TH("%s: " % table.number.label),
record.number,
TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id)
),
TR(TH("%s: " % ltable.cond.label),
ltable.cond.represent(current_log.cond),
TH("%s: " % ltable.status.label),
ltable.status.represent(status),
),
TR(TH("%s: " % ltable.person_id.label),
ltable.person_id.represent(current_log.person_id),
TH("%s: " % ltable.site_id.label),
ltable.site_id.represent(current_log.site_id),
),
),
rheader_tabs,
)
# RFooter Buttons
# @ToDo: Check permissions before displaying buttons
if r.controller == "vehicle":
func = "vehicle"
else:
func = "asset"
asset_action_btns = [A(T("Set Base Facility/Site"),
_href = URL(f=func,
args = [asset_id, "log", "setbase"],
),
_class = "action-btn",
),
]
#if record.location_id:
# A Base Site has been set
# Return functionality removed - as it doesn't set site_id & organisation_id in the logs
if status == ASSET_LOG_ASSIGN:
asset_action_btns.append(A(T("Return"),
_href = URL(f=func,
args = [asset_id, "log", "return"],
),
_class = "action-btn",
))
if status < ASSET_LOG_DONATED:
# The Asset is available for assignment (not disposed)
# @ToDo: deployment setting to prevent assigning assets before returning them
asset_action_btns += [A(T("Assign to Person"),
_href = URL(f=func,
args = [asset_id, "log", "assignperson"],
),
_class = "action-btn",
),
A(T("Assign to Facility/Site"),
_href = URL(f=func,
args = [asset_id, "log", "assignsite"],
),
_class = "action-btn",
),
A(T("Assign to Organization"),
_href = URL(f=func,
args = [asset_id, "log", "assignorg"],
),
_class = "action-btn",
),
]
asset_action_btns.append(A(T("Update Status"),
_href = URL(f=func,
args = [asset_id, "log", "create"],
),
_class = "action-btn",
))
s3.rfooter = TAG[""](*asset_action_btns)
return rheader
return None
# =============================================================================
def asset_controller():
""" RESTful CRUD controller """
s3 = current.response.s3
# Pre-process
def prep(r):
# Location Filter
from .gis import gis_location_filter
gis_location_filter(r)
if r.component_name == "log":
asset_log_prep(r)
else:
item_id = r.get_vars.get("item_id")
if item_id:
# e.g. coming from Incident Action Plan
f = r.table.item_id
f.default = item_id
f.writable = False
f.comment = False
return True
s3.prep = prep
# Import pre-process
def import_prep(data):
# Flag that this is an Import (to distinguish from Sync)
current.response.s3.asset_import = True
s3.import_prep = import_prep
# Post-processor
def postp(r, output):
if r.interactive and r.method != "import":
if r.component_name == "log":
script = "/%s/static/scripts/S3/s3.asset_log.js" % r.application
s3.scripts.append(script)
else:
script = "/%s/static/scripts/S3/s3.asset.js" % r.application
s3.scripts.append(script)
S3CRUD.action_buttons(r, deletable=False)
return output
s3.postp = postp
output = current.rest_controller("asset", "asset",
rheader = asset_rheader,
)
return output
# =============================================================================
class asset_AssetRepresent(S3Represent):
""" Representation of Assets """
def __init__(self,
fields = ("number",), # unused
show_link = False,
translate = False,
multiple = False,
):
super(asset_AssetRepresent,
self).__init__(lookup = "asset_asset",
fields = fields,
show_link = show_link,
translate = translate,
multiple = multiple,
)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom lookup method for organisation rows, does a
left join with the parent organisation. Parameters
key and fields are not used, but are kept for API
compatibility reasons.
@param values: the organisation IDs
"""
db = current.db
s3db = current.s3db
table = s3db.asset_asset
itable = db.supply_item
btable = db.supply_brand
qty = len(values)
if qty == 1:
query = (table.id == values[0])
limitby = (0, 1)
else:
query = (table.id.belongs(values))
limitby = (0, qty)
query &= (itable.id == table.item_id)
rows = db(query).select(table.id,
table.number,
table.type,
itable.name,
btable.name,
left = btable.on(itable.brand_id == btable.id),
limitby = limitby,
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a single Row
@param row: the asset_asset Row
"""
# Custom Row (with the item & brand left-joined)
number = row["asset_asset.number"]
item = row["supply_item.name"]
brand = row.get("supply_brand.name", None)
if not number:
return self.default
represent = "%s (%s" % (number, item)
if brand:
represent = "%s, %s)" % (represent, brand)
else:
represent = "%s)" % represent
return s3_str(represent)
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link.
@param k: the key (site_id)
@param v: the representation of the key
@param row: the row with this key
"""
if row:
atype = row.get("asset_asset.type", None)
if atype == 1:
return A(v, _href=URL(c="vehicle", f="vehicle",
args = [k],
# remove the .aaData extension in paginated views
extension = ""
))
k = s3_unicode(k)
return A(v, _href=self.linkto.replace("[id]", k) \
.replace("%5Bid%5D", k))
# END =========================================================================
| |
#!/usr/bin/env python
from __future__ import division
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import warnings
import django
from django import contrib
from django.utils._os import upath
from django.utils import six
CONTRIB_MODULE_PATH = 'django.contrib'
TEST_TEMPLATE_DIR = 'templates'
CONTRIB_DIR = os.path.dirname(upath(contrib.__file__))
RUNTESTS_DIR = os.path.abspath(os.path.dirname(upath(__file__)))
TEMP_DIR = tempfile.mkdtemp(prefix='django_')
os.environ['DJANGO_TEST_TEMP_DIR'] = TEMP_DIR
SUBDIRS_TO_SKIP = [
'coverage_html',
'data',
'requirements',
'templates',
'test_discovery_sample',
'test_discovery_sample2',
'test_runner_deprecation_app',
'test_runner_invalid_app',
]
ALWAYS_INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.redirects',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.comments',
'django.contrib.admin.apps.SimpleAdminConfig',
'django.contrib.admindocs',
'django.contrib.staticfiles',
'django.contrib.humanize',
'staticfiles_tests',
'staticfiles_tests.apps.test',
'staticfiles_tests.apps.no_label',
'servers.another_app',
]
def get_test_modules():
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB
modules = []
discovery_paths = [
(None, RUNTESTS_DIR),
(CONTRIB_MODULE_PATH, CONTRIB_DIR)
]
if HAS_SPATIAL_DB:
discovery_paths.append(
('django.contrib.gis.tests', os.path.join(CONTRIB_DIR, 'gis', 'tests'))
)
for modpath, dirpath in discovery_paths:
for f in os.listdir(dirpath):
if ('.' in f or
# Python 3 byte code dirs (PEP 3147)
f == '__pycache__' or
f.startswith('sql') or
os.path.basename(f) in SUBDIRS_TO_SKIP or
os.path.isfile(f)):
continue
modules.append((modpath, f))
return modules
def get_installed():
from django.apps import apps
return [app_config.name for app_config in apps.get_app_configs()]
def setup(verbosity, test_labels):
from django.apps import apps, AppConfig
from django.conf import settings
from django.test import TransactionTestCase, TestCase
print("Testing against Django installed in '%s'" % os.path.dirname(django.__file__))
# Force declaring available_apps in TransactionTestCase for faster tests.
def no_available_apps(self):
raise Exception("Please define available_apps in TransactionTestCase "
"and its subclasses.")
TransactionTestCase.available_apps = property(no_available_apps)
TestCase.available_apps = None
state = {
'INSTALLED_APPS': settings.INSTALLED_APPS,
'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""),
'TEMPLATE_DIRS': settings.TEMPLATE_DIRS,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
}
# Redirect some settings for the duration of these tests.
settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS
settings.ROOT_URLCONF = 'urls'
settings.STATIC_URL = '/static/'
settings.STATIC_ROOT = os.path.join(TEMP_DIR, 'static')
settings.TEMPLATE_DIRS = (os.path.join(RUNTESTS_DIR, TEST_TEMPLATE_DIR),)
settings.LANGUAGE_CODE = 'en'
settings.SITE_ID = 1
if verbosity > 0:
# Ensure any warnings captured to logging are piped through a verbose
# logging handler. If any -W options were passed explicitly on command
# line, warnings are not captured, and this has no effect.
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
warnings.filterwarnings(
'ignore',
'django.contrib.comments is deprecated and will be removed before Django 1.8.',
DeprecationWarning
)
warnings.filterwarnings(
'ignore',
'Model class django.contrib.comments.models.* Django 1.9.',
PendingDeprecationWarning
)
# Load all the ALWAYS_INSTALLED_APPS.
django.setup()
# Load all the test model apps.
test_modules = get_test_modules()
# Reduce given test labels to just the app module path
test_labels_set = set()
for label in test_labels:
bits = label.split('.')
if bits[:2] == ['django', 'contrib']:
bits = bits[:3]
else:
bits = bits[:1]
test_labels_set.add('.'.join(bits))
for modpath, module_name in test_modules:
if modpath:
module_label = '.'.join([modpath, module_name])
else:
module_label = module_name
# if the module (or an ancestor) was named on the command line, or
# no modules were named (i.e., run all), import
# this module and add it to INSTALLED_APPS.
if not test_labels:
module_found_in_labels = True
else:
module_found_in_labels = any(
# exact match or ancestor match
module_label == label or module_label.startswith(label + '.')
for label in test_labels_set)
installed_app_names = set(get_installed())
if module_found_in_labels and module_label not in installed_app_names:
if verbosity >= 2:
print("Importing application %s" % module_name)
# HACK.
settings.INSTALLED_APPS.append(module_label)
app_config = AppConfig.create(module_label)
apps.app_configs[app_config.label] = app_config
app_config.import_models(apps.all_models[app_config.label])
apps.clear_cache()
return state
def teardown(state):
from django.conf import settings
try:
# Removing the temporary TEMP_DIR. Ensure we pass in unicode
# so that it will successfully remove temp trees containing
# non-ASCII filenames on Windows. (We're assuming the temp dir
# name itself does not contain non-ASCII characters.)
shutil.rmtree(six.text_type(TEMP_DIR))
except OSError:
print('Failed to remove temp directory: %s' % TEMP_DIR)
# Restore the old settings.
for key, value in state.items():
setattr(settings, key, value)
def django_tests(verbosity, interactive, failfast, test_labels):
from django.conf import settings
state = setup(verbosity, test_labels)
extra_tests = []
# Run the test suite, including the extra validation tests.
from django.test.utils import get_runner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
interactive=interactive,
failfast=failfast,
)
# Catch warnings thrown in test DB setup -- remove in Django 1.9
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
"Custom SQL location '<app_label>/models/sql' is deprecated, "
"use '<app_label>/sql' instead.",
PendingDeprecationWarning
)
failures = test_runner.run_tests(
test_labels or get_installed(), extra_tests=extra_tests)
teardown(state)
return failures
def bisect_tests(bisection_label, options, test_labels):
state = setup(int(options.verbosity), test_labels)
test_labels = test_labels or get_installed()
print('***** Bisecting test suite: %s' % ' '.join(test_labels))
# Make sure the bisection point isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [bisection_label, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
iteration = 1
while len(test_labels) > 1:
midpoint = len(test_labels) // 2
test_labels_a = test_labels[:midpoint] + [bisection_label]
test_labels_b = test_labels[midpoint:] + [bisection_label]
print('***** Pass %da: Running the first half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_a))
failures_a = subprocess.call(subprocess_args + test_labels_a)
print('***** Pass %db: Running the second half of the test suite' % iteration)
print('***** Test labels: %s' % ' '.join(test_labels_b))
print('')
failures_b = subprocess.call(subprocess_args + test_labels_b)
if failures_a and not failures_b:
print("***** Problem found in first half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_a[:-1]
elif failures_b and not failures_a:
print("***** Problem found in second half. Bisecting again...")
iteration = iteration + 1
test_labels = test_labels_b[:-1]
elif failures_a and failures_b:
print("***** Multiple sources of failure found")
break
else:
print("***** No source of failure found... try pair execution (--pair)")
break
if len(test_labels) == 1:
print("***** Source of error: %s" % test_labels[0])
teardown(state)
def paired_tests(paired_test, options, test_labels):
state = setup(int(options.verbosity), test_labels)
test_labels = test_labels or get_installed()
print('***** Trying paired execution')
# Make sure the constant member of the pair isn't in the test list
# Also remove tests that need to be run in specific combinations
for label in [paired_test, 'model_inheritance_same_model_name']:
try:
test_labels.remove(label)
except ValueError:
pass
subprocess_args = [
sys.executable, upath(__file__), '--settings=%s' % options.settings]
if options.failfast:
subprocess_args.append('--failfast')
if options.verbosity:
subprocess_args.append('--verbosity=%s' % options.verbosity)
if not options.interactive:
subprocess_args.append('--noinput')
for i, label in enumerate(test_labels):
print('***** %d of %d: Check test pairing with %s' % (
i + 1, len(test_labels), label))
failures = subprocess.call(subprocess_args + [label, paired_test])
if failures:
print('***** Found problem pair with %s' % label)
return
print('***** No problem pair found')
teardown(state)
if __name__ == "__main__":
from optparse import OptionParser
usage = "%prog [options] [module module module ...]"
parser = OptionParser(usage=usage)
parser.add_option(
'-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all '
'output')
parser.add_option(
'--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_option(
'--failfast', action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first failed '
'test.')
parser.add_option(
'--settings',
help='Python path to settings module, e.g. "myproject.settings". If '
'this isn\'t provided, the DJANGO_SETTINGS_MODULE environment '
'variable will be used.')
parser.add_option(
'--bisect', action='store', dest='bisect', default=None,
help='Bisect the test suite to discover a test that causes a test '
'failure when combined with the named test.')
parser.add_option(
'--pair', action='store', dest='pair', default=None,
help='Run the test suite in pairs with the named test to find problem '
'pairs.')
parser.add_option(
'--liveserver', action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used with '
'LiveServerTestCase) is expected to run from. The default value '
'is localhost:8081.')
parser.add_option(
'--selenium', action='store_true', dest='selenium',
default=False,
help='Run the Selenium tests as well (if Selenium is installed)')
options, args = parser.parse_args()
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
else:
if "DJANGO_SETTINGS_MODULE" not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'test_sqlite'
options.settings = os.environ['DJANGO_SETTINGS_MODULE']
if options.liveserver is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options.liveserver
if options.selenium:
os.environ['DJANGO_SELENIUM_TESTS'] = '1'
if options.bisect:
bisect_tests(options.bisect, options, args)
elif options.pair:
paired_tests(options.pair, options, args)
else:
failures = django_tests(int(options.verbosity), options.interactive,
options.failfast, args)
if failures:
sys.exit(bool(failures))
| |
import re
from datetime import tzinfo, timedelta, datetime
#########################################################
# source: https://docs.python.org/2/library/datetime.html
# Example tzinfo classes:
# A class capturing the platform's idea of local time.
import time as _time
ZERO = timedelta(0)
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
# end of code from python docs
#########################################################
# it would be nice if we could use datetime.strptime
# but timezone parsing (%z) is not working on Python 2.*
# http://stackoverflow.com/questions/20194496/iso-to-datetime-object-z-is-a-bad-directive
#
# so the 2 options are:
# - use dateutil (cons: external dependency)
# - implement just what is needed (cons: errors?)
_NAMED_REGEXPS = (
('{YEAR}', '(?P<year>DIGIT{4})'),
('{MONTH}', '(?P<month>DIGIT{2})'),
('{DAY}', '(?P<day>DIGIT{2})'),
('{HOUR}', '(?P<hour>DIGIT{2})'),
('{MINUTE}', '(?P<minute>DIGIT{2})'),
('{SECOND}', '(?P<second>DIGIT{2})'),
('{MICROSEC}', '(?P<microsec>DIGIT{6})'),
('{TIMEZONE}', '(?P<tzsign>[-+])(?P<tzhour>DIGIT{2})(?P<tzmin>DIGIT{2})'),
('DIGIT', '[0-9]')
)
def _compile_parser(template):
for name, regexp in _NAMED_REGEXPS:
template = template.replace(name, regexp)
match = re.compile(template + '$').match
def convert(timeish):
parts = match(timeish)
if parts:
values = parts.groupdict()
# v for value
def v(key, default):
return int(values.get(key, default))
tzoffset = (
(-1 if values.get('tzsign', '+') == '-' else 1)
*
(v('tzhour', 0) * 60 + v('tzmin', 0)))
return datetime(
v('year', 0),
v('month', 1),
v('day', 1),
v('hour', 0),
v('minute', 0),
v('second', 0),
v('microsec', 0),
FixedOffset(tzoffset, 'TZ' + str(tzoffset)))
return convert
_DEFAULT_FULL_TIMESTAMP = '{YEAR}{MONTH}{DAY}T{HOUR}{MINUTE}{SECOND}{MICROSEC}{TIMEZONE}'
_parse_default_timestamp = _compile_parser(_DEFAULT_FULL_TIMESTAMP)
_ISO8601_PARSERS = [
_parse_default_timestamp
] + [
_compile_parser(template) for template in (
'{YEAR}',
'{YEAR}{MONTH}',
'{YEAR}-{MONTH}',
'{YEAR}{MONTH}{DAY}',
'{YEAR}-{MONTH}-{DAY}',
'{YEAR}{MONTH}{DAY}T{HOUR}{MINUTE}{SECOND}{TIMEZONE}',
'{YEAR}-{MONTH}-{DAY}T{HOUR}:{MINUTE}:{SECOND}{TIMEZONE}',
'{YEAR}-{MONTH}-{DAY}T{HOUR}:{MINUTE}:{SECOND}.{MICROSEC}{TIMEZONE}',
)]
def parse_iso8601(timeish):
'''
Parse some iso-8601 date/time formats to a datetime with timezone.
'''
for parse in _ISO8601_PARSERS:
parsed = parse(timeish)
if parsed is not None:
return parsed
raise ValueError('Time is not in a recognised iso-8601 format', timeish)
_TIME_UNITS = {
'y': 'years',
'm': 'months',
'w': 'weeks',
'd': 'days',
# expected to be less used:
'H': 'hours',
'M': 'minutes',
'S': 'seconds',
}
_DELTA = r'([+-]?\d+)([{units}])'.format(units='wdHMS')
_DELTAS = '(?:{})*$'.format(_DELTA)
def parse_timedelta(delta_str):
'''
Parse a time-delta of the format {AMOUNT}{UNIT}[{AMOUNT}{UNIT}[..]]
E.g. '2w4d' for 2 weeks and 4 days
'''
match = re.match(_DELTAS, delta_str)
if match:
delta = timedelta()
for amount, unit_abbrev in re.findall(_DELTA, delta_str):
delta += timedelta(**{_TIME_UNITS[unit_abbrev]: int(amount)})
return delta
raise ValueError(
'Invalid delta format: expecting {AMOUNT}{UNIT}* where UNIT is one of w,d,H,M,S',
delta_str)
def timestamp():
'''
A string representation of this moment.
With millisecond resolution and time zone so that
- users recognise the time they made it,
even if they live in non-trivial time zones (think: +0800)
- when parsed back, can be compared with others
even from different time zones
'''
return datetime.now(Local).strftime('%Y%m%dT%H%M%S%f%z')
# a not so forgiving parser
def time_from_timestamp(timestamp_str):
'''
Parse a datetime from a timestamp string - strict!
'''
parsed = _parse_default_timestamp(timestamp_str)
if parsed is None:
raise ValueError(
'Not a full, basic timestamp (%s)' % _DEFAULT_FULL_TIMESTAMP,
timestamp_str)
return parsed
# The earliest time, beads could be created (actually it could be 10+ years later)
EPOCH_STR = '20000101T000000000000+0000'
assert time_from_timestamp(EPOCH_STR) == datetime(2000, 1, 1, 0, 0, 0, 0, FixedOffset(0, 'epoch'))
def time_from_user(timeish):
'''
Parse a datetime from user entered string - multiple formats
Allows informal differences from current time.
'''
try:
return parse_iso8601(timeish)
except ValueError:
pass
try:
# fall back to interpreting it as a time-delta added to `now`
return datetime.now(Local) + parse_timedelta(timeish)
except ValueError:
raise ValueError(
'Can not interpret string either as time or as delta', timeish)
| |
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 64
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", None),
train_buildings=[1, 2, 4],
validation_buildings=[5],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
standardise_targets=True,
independently_center_inputs=True,
ignore_incomplete=True,
offset_probability=0.5,
ignore_offset_activations=True
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
5000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name, target_appliance, seq_length):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
target_appliance=target_appliance,
logger=logging.getLogger(name),
seq_length=seq_length
))
source = SameLocation(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 8
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'label': 'dense1',
'type': DenseLayer,
'num_units': seq_length,
'nonlinearity': rectify
},
{
'label': 'dense2',
'type': DenseLayer,
'num_units': 32,
'nonlinearity': rectify
},
{
'label': 'dense3',
'type': DenseLayer,
'num_units': seq_length,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, seq_length - 3, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', ['fridge freezer', 'fridge', 'freezer'], 800),
('b', "'coffee maker'", 512),
('c', "'dish washer'", 2000),
('d', "'hair dryer'", 256),
('e', "'kettle'", 256),
('f', "'oven'", 2000),
('g', "'toaster'", 256),
('h', "'light'", 2000),
('i', "'washer dryer'", 2000)
]
for experiment, appliance, seq_length in APPLIANCES[:1]:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e468.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| |
# -*- coding: utf-8 -*-
"""Product models."""
import functools
from datetime import date
from flask import session
from flask_login import current_user, user_logged_in
from fulfil_client.model import (Date, FloatType, ModelType, MoneyType,
One2ManyType, StringType)
from flask_babel import format_number
from cached_property import cached_property
from shop.fulfilio import Model, ShopQuery
from shop.globals import current_channel, current_context
def require_cart_with_sale(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
cart = args[0]
cart.sanitise()
if not cart.sale:
sale = Cart.create_sale()
del cart.sale # Delete cached property
cart._values['sale'] = sale.id
cart.save()
return function(*args, **kwargs)
return wrapper
class SaleLine(Model):
__model_name__ = 'sale.line'
_eager_fields = set(['sale.currency.code'])
product = ModelType("product.product", cache=True)
quantity = FloatType()
unit = ModelType("product.uom", cache=True)
unit_price = MoneyType('currency_code')
amount = MoneyType('currency_code')
description = StringType()
delivery_address = ModelType('party.address')
delivery_date = Date()
gift_message = StringType()
shipment_cost = MoneyType('currency_code')
@property
def currency_code(self):
return self._values.get('sale.currency.code')
def update_shipping_address(self, address_id):
self.delivery_address = address_id
self.save()
def update_delivery_date(self, delivery_date):
self.delivery_date = delivery_date
self.save()
def update_gift_message(self, gift_message):
self.gift_message = gift_message
self.save()
def serialize(self):
current_locale = current_context.get('language') or 'en_US'
data = {
'id': self.id,
'product_id': self.product and self.product.id,
'product': self.product and self.product.name or None,
'product_identifier': self.product and self.product.listing and \
self.product.listing.product_identifier,
'quantity': format_number(self.quantity),
'gift_message': self.gift_message or None,
'unit': self.unit.symbol,
'unit_price': self.unit_price.format(current_locale),
'amount': self.amount.format(current_locale),
'url': self.product and self.product.listing and \
self.product.listing.get_absolute_url(),
'image': self.product.available_image,
'delivery_address': None,
'is_shipping_line': True if self.shipment_cost else False
}
if self.delivery_address:
data['delivery_address'] = self.delivery_address._values
return data
class Sale(Model):
__model_name__ = 'sale.sale'
_eager_fields = set(['currency.code', 'lines'])
number = StringType()
party = ModelType("party.party")
shipment_address = ModelType("party.address")
invoice_address = ModelType("party.address")
total_amount = MoneyType('currency_code')
tax_amount = MoneyType('currency_code')
untaxed_amount = MoneyType('currency_code')
total_shipment_cost = MoneyType('currency_code')
invoices = One2ManyType("account.invoice")
sale_date = Date()
state = StringType()
currency = StringType()
promo_code = StringType()
#: This access code will be cross checked if the user is guest for a match
#: to optionally display the order to an user who has not authenticated
#: as yet
guest_access_code = StringType()
@classmethod
def get_shop_query(cls):
return ShopQuery(cls.rpc, cls)
@property
def currency_code(self):
return self._values.get('currency.code')
@cached_property
def lines(self):
return SaleLine.query.filter_by_domain([
('sale', '=', self.id),
]).all()
@cached_property
def items_total(self):
"Item total without tax and shipping"
return self.untaxed_amount - self.total_shipment_cost
def add_product(self, product_id, quantity, delivery_date, address_id):
# check if SaleLine already exists
sale_line = SaleLine.query.filter_by_domain([
('product', '=', product_id),
('sale', '=', self.id),
('delivery_date', '=', delivery_date),
('delivery_address', '=', address_id)
]).first()
if sale_line:
sale_line.quantity = quantity
sale_line.save()
else:
line_data = {
'sale': self.id,
'product': product_id,
'quantity': quantity,
'_parent_sale.shipment_address': self.shipment_address and
self.shipment_address.id,
'_parent_sale.channel': current_channel.id,
'_parent_sale.party': current_channel.anonymous_customer.id,
'_parent_sale.currency': current_channel.currency,
'warehouse': current_channel.warehouse,
'delivery_address': address_id,
'delivery_date': delivery_date,
}
line_data.update(SaleLine.rpc.on_change_product(line_data))
if line_data.get('taxes'):
line_data['taxes'] = [('add', line_data.get('taxes'))]
sale_line = SaleLine(**{
k: v for k, v in line_data.iteritems()
if '.' not in k
}).save()
return sale_line
def prepare_for_payment(self):
"""Makes cart sale ready for payment.
Helpful when you want to make sure shipping is properly applied
before payment.
"""
pass
def apply_promo_code(self, promo_code):
Sale.rpc.write([self.id], {'promo_code': promo_code})
Sale.rpc.draft([self.id])
Sale.rpc.apply_promotion([self.id])
class Cart(Model):
__model_name__ = 'nereid.cart'
_eager_fields = {'sale', }
sessionid = StringType()
user = ModelType("nereid.user")
@cached_property
def sale(self):
if self._values.get('sale'):
return Sale.get_by_id(self._values['sale'])
@staticmethod
@user_logged_in.connect
def login_event_handler(sender, user):
"""This method is triggered when a login event occurs.
When a user logs in, all items in his guest cart should be added to his
logged in or registered cart. If there is no such cart, it should be
created.
"""
Cart._login_event_handler(sender, user)
@classmethod
def _login_event_handler(cls, sender, user):
guest_cart = Cart.find_cart(None)
if guest_cart and guest_cart.sale and guest_cart.sale.lines:
# Active cart is user's cart
user_cart = Cart.get_active()
# Transfer lines from guest cart to user cart
for line in guest_cart.sale.lines:
user_cart.add_product(line.product.id, line.quantity)
# Clear the old cart
guest_cart.clear()
def confirm(self):
"Move order to confirmation state"
sale = self.sale
sale.sale_date = date.today()
sale.save()
Sale.rpc.quote([sale.id])
Sale.rpc.confirm([sale.id])
self.clear()
@property
def size(self):
# TODO: Assuming every item has same unit
if self.is_empty:
return 0
return sum(
map(
lambda l: l.quantity,
filter(lambda l: not l.shipment_cost, self.sale.lines)
)
)
@property
def is_empty(self):
if not self.sale:
return True
if len(self.sale.lines) == 0:
return True
return False
@classmethod
def get_active(cls):
"""
Get active cart for either a user or a guest
Or create one if none found
"""
domain = [
('sessionid', '=', session.sid)
]
if not current_user.is_anonymous:
domain = [
('user', '=', current_user.id)
]
cart = Cart.query.filter_by_domain(domain).first()
if not cart:
if current_user.is_anonymous:
cart = Cart(sessionid=session.sid).save()
else:
cart = Cart(user=current_user.id).save()
return cart
@classmethod
def find_cart(cls, user_id=None):
"""
Return the cart for the user if one exists. The user is None a guest
cart for the session is found.
:param user: ID of the user
:return: Active record of cart or None
"""
domain = [
('user', '=', user_id)
]
if not user_id:
domain.append(('sessionid', '=', session.sid))
cart = Cart.query.filter_by_domain(domain).first()
return cart
@classmethod
def create_sale(cls):
if current_user.is_anonymous:
party = current_channel.anonymous_customer
else:
party = current_user.party
sale_data = {
"party": party.id,
"invoice_address": None,
"shipment_address": None,
"company": current_channel.company,
"currency": current_channel.currency,
"is_cart": True,
"channel": current_channel.id,
}
sale_data.update(Sale.rpc.on_change_channel(sale_data))
sale = Sale(**{
k: v for k, v in sale_data.iteritems()
if '.' not in k
}).save()
return sale
@require_cart_with_sale
def add_product(
self, product_id, quantity, delivery_date=None, address_id=None
):
self.refresh()
self.sale.add_product(product_id, quantity, delivery_date, address_id)
def remove_sale_line(self, line_id):
self.refresh()
SaleLine.rpc.delete([line_id])
def clear(self):
Cart.rpc.write([self.id], {'sale': None})
@require_cart_with_sale
def update_shipping_address(self, line_id, address_id):
line = SaleLine.get_by_id(line_id)
line.update_shipping_address(address_id)
@require_cart_with_sale
def update_delivery_date(self, line_id, delivery_date):
line = SaleLine.get_by_id(line_id)
line.update_delivery_date(delivery_date)
def update_gift_message(self, line_id, gift_message):
line = SaleLine.get_by_id(line_id)
line.update_gift_message(gift_message)
def serialize(self):
if not self.sale:
return {
'empty': True,
}
current_locale = current_context.get('language') or 'en_US'
data = {
'empty': self.is_empty,
'size': self.size,
'has_shipping': bool(self.sale.total_shipment_cost),
'total_amount': self.sale.total_amount.format(current_locale),
'tax_amount': self.sale.tax_amount.format(current_locale),
'untaxed_amount': self.sale.untaxed_amount.format(current_locale),
'untaxed_without_shipping': (
self.sale.untaxed_amount - self.sale.total_shipment_cost
).format(current_locale),
'total_shipment_cost': self.sale.total_shipment_cost.format(
current_locale),
'shipment_address': None,
}
data['lines'] = [
line.serialize() for line in self.sale.lines
]
if self.sale.shipment_address:
data['shipment_address'] = self.sale.shipment_address._values
return data
def sanitise(self):
"""This method verifies that the cart is valid
"""
if not self.sale:
return
if self.sale.state != 'draft':
self.sale = None
self.save()
@require_cart_with_sale
def apply_promo_code(self, promo_code):
self.sale.apply_promo_code(promo_code)
| |
from django.contrib import messages
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.shortcuts import render, redirect, get_object_or_404
from django.utils.translation import ugettext_lazy as _
from helpers.models import Helpers
from offer.models import OfferCategory
from tours.models import Category
from .forms import RelatedLinkForm
from .models import RelatedLink
def get_lang(request):
lang = request.LANGUAGE_CODE
return lang
def get_company():
return Helpers.objects.get(id=1).company_name
def related_links_list(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
queryset_list = RelatedLink.objects.all()
lang = get_lang(request)
breadcrumbs = [
{'url': '/', 'name': _('Home')},
{'url': '#', 'name': _('Related links'), 'active': True}
]
paginator = Paginator(queryset_list, 6)
page_request_var = 'page'
page = request.GET.get(page_request_var)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
queryset = paginator.page(1)
except EmptyPage:
queryset = paginator.page(paginator.num_pages)
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('Related links'),
'object_list': queryset,
'breadcrumbs': breadcrumbs,
'page_request_var': page_request_var,
'value': _('Add'),
}
return render(request, 'partials/related-links.html', context)
def related_links_detail(request, pk=None):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
lang = request.LANGUAGE_CODE
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
related_link = RelatedLink.objects.get(pk=pk)
title = {
'pt': related_link.title_PT,
'en': related_link.title_EN,
'de': related_link.title_DE
}
description = {
'pt': related_link.description_PT,
'en': related_link.description_EN,
'de': related_link.description_DE
}
breadcrumbs = [
{'url': '/', 'name': _('Home'), 'active': False},
{'url': '/related-links', 'name': _('Related links'), 'active': False},
{'url': '#', 'name': title[lang], 'active': True}]
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'breadcrumbs': breadcrumbs,
'title': title[get_lang(request)],
'object': {
'id': related_link.id,
'keywords_SEO': related_link.keywords_SEO,
'description_SEO': related_link.description_SEO,
'title': title[lang],
'description': description[lang],
'img': related_link.img,
},
}
return render(request, 'templates/_related_links_details.html', context)
def related_links_create(request):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
lang = request.LANGUAGE_CODE
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
else:
form = RelatedLinkForm(request.POST or None, request.FILES or None)
breadcrumbs = [{'url': '/', 'name': _('Home'), 'active': False},
{'url': '/related-links', 'name': _('Related links'),
'active': False},
{'url': '#', 'name': _('Create Offer'), 'active': True}]
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
messages.success(request, 'Link Created')
return redirect('related_links:list')
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('Create Offer'),
'breadcrumbs': breadcrumbs,
'value': _('Add'),
'form': form
}
return render(request, 'templates/_form.html', context)
def related_links_update(request, pk=None):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
lang = request.LANGUAGE_CODE
footer = {
'pt': Helpers.objects.get(id=1).about_footer_PT,
'en': Helpers.objects.get(id=1).about_footer_EN,
'de': Helpers.objects.get(id=1).about_footer_DE
}
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
else:
related_link = get_object_or_404(RelatedLink, pk=pk)
title = {
'pt': related_link.title_PT,
'en': related_link.title_EN,
'de': related_link.title_DE
}
breadcrumbs = [{'url': '/', 'name': _('Home')},
{'url': '/related-links', 'name': _('Related links')},
{'url': '#', 'name': _('Edit') + ' ' + title[lang], 'active': True}]
form = RelatedLinkForm(request.POST or None, request.FILES or None, instance=related_link)
if form.is_valid():
related_link = form.save(commit=False)
related_link.save()
messages.success(request, _('Link saved'))
return redirect('related_links:list')
context = {
'footer': {
'about': footer[lang],
'icon': Helpers.objects.get(id=1).footer_icon
},
'nav': {
'tour_categories_list': Category.objects.all(),
'offer_categories_list': OfferCategory.objects.all(),
},
'company': get_company(),
'title': _('Edit') + ' ' + title[lang],
'breadcrumbs': breadcrumbs,
'instance': related_link,
'form': form,
'value': _('Add'),
}
return render(request, 'templates/_form.html', context)
def related_links_delete(request, pk=None):
query = request.GET.get('q')
if query:
return redirect(reverse('search') + '?q=' + query)
if not request.user.is_staff or not request.user.is_superuser:
return redirect('accounts:signup')
instance = get_object_or_404(RelatedLink, pk=pk)
instance.delete()
messages.success(request, 'Link deleted')
return redirect('related_links:list')
| |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import sys
import signal
import psutil
from oslo_config import cfg
import st2tests.config
from st2common.util import concurrency
from st2common.models.db import db_setup
from st2reactor.container.process_container import PROCESS_EXIT_TIMEOUT
from st2common.util.green.shell import run_command
from st2common.bootstrap.sensorsregistrar import register_sensors
from st2tests.base import IntegrationTestCase
__all__ = ["SensorContainerTestCase"]
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ST2_CONFIG_PATH = os.path.join(BASE_DIR, "../../../conf/st2.tests.conf")
ST2_CONFIG_PATH = os.path.abspath(ST2_CONFIG_PATH)
PYTHON_BINARY = sys.executable
BINARY = os.path.join(BASE_DIR, "../../../st2reactor/bin/st2sensorcontainer")
BINARY = os.path.abspath(BINARY)
PACKS_BASE_PATH = os.path.abspath(os.path.join(BASE_DIR, "../../../contrib"))
DEFAULT_CMD = [
PYTHON_BINARY,
BINARY,
"--config-file",
ST2_CONFIG_PATH,
"--sensor-ref=examples.SamplePollingSensor",
]
class SensorContainerTestCase(IntegrationTestCase):
"""
Note: For those tests MongoDB must be running, virtualenv must exist for
examples pack and sensors from the example pack must be registered.
"""
print_stdout_stderr_on_teardown = True
@classmethod
def setUpClass(cls):
super(SensorContainerTestCase, cls).setUpClass()
st2tests.config.parse_args()
username = (
cfg.CONF.database.username
if hasattr(cfg.CONF.database, "username")
else None
)
password = (
cfg.CONF.database.password
if hasattr(cfg.CONF.database, "password")
else None
)
cls.db_connection = db_setup(
cfg.CONF.database.db_name,
cfg.CONF.database.host,
cfg.CONF.database.port,
username=username,
password=password,
ensure_indexes=False,
)
# NOTE: We need to perform this patching because test fixtures are located outside of the
# packs base paths directory. This will never happen outside the context of test fixtures.
cfg.CONF.content.packs_base_paths = PACKS_BASE_PATH
# Register sensors
register_sensors(packs_base_paths=[PACKS_BASE_PATH], use_pack_cache=False)
# Create virtualenv for examples pack
virtualenv_path = "/tmp/virtualenvs/examples"
run_command(cmd=["rm", "-rf", virtualenv_path])
cmd = [
"virtualenv",
"--system-site-packages",
"--python",
PYTHON_BINARY,
virtualenv_path,
]
run_command(cmd=cmd)
def test_child_processes_are_killed_on_sigint(self):
process = self._start_sensor_container()
# Give it some time to start up
concurrency.sleep(7)
# Assert process has started and is running
self.assertProcessIsRunning(process=process)
# Verify container process and children sensor / wrapper processes are running
pp = psutil.Process(process.pid)
children_pp = pp.children()
self.assertEqual(pp.cmdline()[1:], DEFAULT_CMD[1:])
self.assertEqual(len(children_pp), 1)
# Send SIGINT
process.send_signal(signal.SIGINT)
# SIGINT causes graceful shutdown so give it some time to gracefuly shut down the sensor
# child processes
concurrency.sleep(PROCESS_EXIT_TIMEOUT + 1)
# Verify parent and children processes have exited
self.assertProcessExited(proc=pp)
self.assertProcessExited(proc=children_pp[0])
self.remove_process(process=process)
def test_child_processes_are_killed_on_sigterm(self):
process = self._start_sensor_container()
# Give it some time to start up
concurrency.sleep(5)
# Verify container process and children sensor / wrapper processes are running
pp = psutil.Process(process.pid)
children_pp = pp.children()
self.assertEqual(pp.cmdline()[1:], DEFAULT_CMD[1:])
self.assertEqual(len(children_pp), 1)
# Send SIGTERM
process.send_signal(signal.SIGTERM)
# SIGTERM causes graceful shutdown so give it some time to gracefuly shut down the sensor
# child processes
concurrency.sleep(PROCESS_EXIT_TIMEOUT + 8)
# Verify parent and children processes have exited
self.assertProcessExited(proc=pp)
self.assertProcessExited(proc=children_pp[0])
self.remove_process(process=process)
def test_child_processes_are_killed_on_sigkill(self):
process = self._start_sensor_container()
# Give it some time to start up
concurrency.sleep(5)
# Verify container process and children sensor / wrapper processes are running
pp = psutil.Process(process.pid)
children_pp = pp.children()
self.assertEqual(pp.cmdline()[1:], DEFAULT_CMD[1:])
self.assertEqual(len(children_pp), 1)
# Send SIGKILL
process.send_signal(signal.SIGKILL)
# Note: On SIGKILL processes should be killed instantly
concurrency.sleep(1)
# Verify parent and children processes have exited
self.assertProcessExited(proc=pp)
self.assertProcessExited(proc=children_pp[0])
self.remove_process(process=process)
def test_single_sensor_mode(self):
# 1. --sensor-ref not provided
cmd = [
PYTHON_BINARY,
BINARY,
"--config-file",
ST2_CONFIG_PATH,
"--single-sensor-mode",
]
process = self._start_sensor_container(cmd=cmd)
pp = psutil.Process(process.pid)
# Give it some time to start up
concurrency.sleep(5)
stdout = process.stdout.read()
self.assertTrue(
(
b"--sensor-ref argument must be provided when running in single sensor "
b"mode"
)
in stdout
)
self.assertProcessExited(proc=pp)
self.remove_process(process=process)
# 2. sensor ref provided
cmd = [
BINARY,
"--config-file",
ST2_CONFIG_PATH,
"--single-sensor-mode",
"--sensor-ref=examples.SampleSensorExit",
]
process = self._start_sensor_container(cmd=cmd)
pp = psutil.Process(process.pid)
# Give it some time to start up
concurrency.sleep(1)
# Container should exit and not respawn a sensor in single sensor mode
stdout = process.stdout.read()
self.assertTrue(
b"Process for sensor examples.SampleSensorExit has exited with code 110"
)
self.assertTrue(b"Not respawning a sensor since running in single sensor mode")
self.assertTrue(b"Process container quit with exit_code 110.")
concurrency.sleep(2)
self.assertProcessExited(proc=pp)
self.remove_process(process=process)
def _start_sensor_container(self, cmd=DEFAULT_CMD):
subprocess = concurrency.get_subprocess_module()
print("Using command: %s" % (" ".join(cmd)))
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
preexec_fn=os.setsid,
)
self.add_process(process=process)
return process
| |
"""
sentry.tagstore.snuba.backend
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2018 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import functools
from collections import defaultdict
from datetime import timedelta
from dateutil.parser import parse as parse_datetime
from django.utils import timezone
import six
from sentry.tagstore import TagKeyStatus
from sentry.tagstore.base import TagStorage
from sentry.tagstore.exceptions import (
GroupTagKeyNotFound,
GroupTagValueNotFound,
TagKeyNotFound,
TagValueNotFound,
)
from sentry.tagstore.types import TagKey, TagValue, GroupTagKey, GroupTagValue
from sentry.utils import snuba
from sentry.utils.dates import to_timestamp
SEEN_COLUMN = 'timestamp'
tag_value_data_transformers = {
'first_seen': parse_datetime,
'last_seen': parse_datetime,
}
def fix_tag_value_data(data):
for key, transformer in tag_value_data_transformers.items():
if key in data:
data[key] = transformer(data[key])
return data
class SnubaTagStorage(TagStorage):
def get_time_range(self, days=90):
"""
Returns the default (start, end) time range for querrying snuba.
"""
# TODO this should use the per-project retention figure to limit
# the query to looking at only the retention window for the project.
end = timezone.now()
return (end - timedelta(days=days), end)
def __get_tag_key(self, project_id, group_id, environment_id, key):
start, end = self.get_time_range()
tag = 'tags[{}]'.format(key)
filters = {
'project_id': [project_id],
'environment': [environment_id],
}
if group_id is not None:
filters['issue'] = [group_id]
conditions = [[tag, '!=', '']]
aggregations = [['uniq', tag, 'unique_values']]
result = snuba.query(start, end, [], conditions, filters, aggregations,
referrer='tagstore.__get_tag_key')
if result == 0:
raise TagKeyNotFound if group_id is None else GroupTagKeyNotFound
else:
data = {
'key': key,
'values_seen': result,
}
if group_id is None:
return TagKey(**data)
else:
return GroupTagKey(group_id=group_id, **data)
def __get_tag_keys(self, project_id, group_id, environment_id, limit=1000):
start, end = self.get_time_range()
filters = {
'project_id': [project_id],
'environment': [environment_id],
}
if group_id is not None:
filters['issue'] = [group_id]
aggregations = [['uniq', 'tags_value', 'values_seen']]
result = snuba.query(start, end, ['tags_key'], [], filters,
aggregations, limit=limit, orderby='-values_seen',
referrer='tagstore.__get_tag_keys')
if group_id is None:
ctor = TagKey
else:
ctor = functools.partial(GroupTagKey, group_id=group_id)
return set([ctor(key=key, values_seen=values_seen)
for key, values_seen in six.iteritems(result) if values_seen])
def __get_tag_value(self, project_id, group_id, environment_id, key, value):
start, end = self.get_time_range()
tag = 'tags[{}]'.format(key)
filters = {
'project_id': [project_id],
'environment': [environment_id],
}
if group_id is not None:
filters['issue'] = [group_id]
conditions = [[tag, '=', value]]
aggregations = [
['count()', '', 'times_seen'],
['min', SEEN_COLUMN, 'first_seen'],
['max', SEEN_COLUMN, 'last_seen'],
]
data = snuba.query(start, end, [], conditions, filters, aggregations)
if not data['times_seen'] > 0:
raise TagValueNotFound if group_id is None else GroupTagValueNotFound
else:
data.update({
'key': key,
'value': value,
})
if group_id is None:
return TagValue(**fix_tag_value_data(data))
else:
return GroupTagValue(group_id=group_id, **fix_tag_value_data(data))
def __get_tag_values(self, project_id, group_id, environment_id, key):
start, end = self.get_time_range()
tag = 'tags[{}]'.format(key)
filters = {
'project_id': [project_id],
'environment': [environment_id],
}
if group_id is not None:
filters['issue'] = [group_id]
conditions = [[tag, '!=', '']]
aggregations = [
['count()', '', 'times_seen'],
['min', SEEN_COLUMN, 'first_seen'],
['max', SEEN_COLUMN, 'last_seen'],
]
result = snuba.query(start, end, [tag], conditions, filters, aggregations,
referrer='tagstore.__get_tag_values')
if group_id is None:
ctor = TagValue
else:
ctor = functools.partial(GroupTagValue, group_id=group_id)
return set([ctor(key=key, value=value, **fix_tag_value_data(data))
for value, data in result.items()])
def get_tag_key(self, project_id, environment_id, key, status=TagKeyStatus.VISIBLE):
assert status is TagKeyStatus.VISIBLE
return self.__get_tag_key(project_id, None, environment_id, key)
def get_tag_keys(self, project_id, environment_id, status=TagKeyStatus.VISIBLE):
assert status is TagKeyStatus.VISIBLE
return self.__get_tag_keys(project_id, None, environment_id)
def get_tag_value(self, project_id, environment_id, key, value):
return self.__get_tag_value(project_id, None, environment_id, key, value)
def get_tag_values(self, project_id, environment_id, key):
return self.__get_tag_values(project_id, None, environment_id, key)
def get_group_tag_key(self, project_id, group_id, environment_id, key):
return self.__get_tag_key(project_id, group_id, environment_id, key)
def get_group_tag_keys(self, project_id, group_id, environment_id, limit=None):
return self.__get_tag_keys(project_id, group_id, environment_id, limit=limit)
def get_group_tag_value(self, project_id, group_id, environment_id, key, value):
return self.__get_tag_value(project_id, group_id, environment_id, key, value)
def get_group_tag_values(self, project_id, group_id, environment_id, key):
return self.__get_tag_values(project_id, group_id, environment_id, key)
def get_group_list_tag_value(self, project_id, group_id_list, environment_id, key, value):
start, end = self.get_time_range()
tag = 'tags[{}]'.format(key)
filters = {
'project_id': [project_id],
'environment': [environment_id],
'issue': group_id_list,
}
conditions = [
[tag, '=', value]
]
aggregations = [
['count()', '', 'times_seen'],
['min', SEEN_COLUMN, 'first_seen'],
['max', SEEN_COLUMN, 'last_seen'],
]
result = snuba.query(start, end, ['issue'], conditions, filters, aggregations,
referrer='tagstore.get_group_list_tag_value')
return {
issue: GroupTagValue(
group_id=issue,
key=key,
value=value,
**fix_tag_value_data(data)
) for issue, data in six.iteritems(result)
}
def get_group_tag_value_count(self, project_id, group_id, environment_id, key):
start, end = self.get_time_range()
tag = 'tags[{}]'.format(key)
filters = {
'project_id': [project_id],
'environment': [environment_id],
'issue': [group_id],
}
conditions = [[tag, '!=', '']]
aggregations = [['count()', '', 'count']]
return snuba.query(start, end, [], conditions, filters, aggregations,
referrer='tagstore.get_group_tag_value_count')
def get_top_group_tag_values(self, project_id, group_id, environment_id, key, limit=3):
start, end = self.get_time_range()
tag = 'tags[{}]'.format(key)
filters = {
'project_id': [project_id],
'environment': [environment_id],
'issue': [group_id],
}
conditions = [[tag, '!=', '']]
aggregations = [
['count()', '', 'times_seen'],
['min', SEEN_COLUMN, 'first_seen'],
['max', SEEN_COLUMN, 'last_seen'],
]
result = snuba.query(start, end, [tag], conditions, filters,
aggregations, limit=limit, orderby='-times_seen',
referrer='tagstore.get_top_group_tag_values')
return [
GroupTagValue(
group_id=group_id,
key=key,
value=value,
**fix_tag_value_data(data)
) for value, data in six.iteritems(result)
]
def __get_release(self, project_id, group_id, first=True):
start, end = self.get_time_range()
filters = {
'project_id': [project_id],
}
conditions = [['tags[sentry:release]', 'IS NOT NULL', None]]
if group_id is not None:
filters['issue'] = [group_id]
aggregations = [['min' if first else 'max', SEEN_COLUMN, 'seen']]
orderby = 'seen' if first else '-seen'
result = snuba.query(start, end, ['tags[sentry:release]'], conditions, filters,
aggregations, limit=1, orderby=orderby,
referrer='tagstore.__get_release')
if not result:
return None
else:
return result.keys()[0]
def get_first_release(self, project_id, group_id):
return self.__get_release(project_id, group_id, True)
def get_last_release(self, project_id, group_id):
return self.__get_release(project_id, group_id, False)
def get_release_tags(self, project_ids, environment_id, versions):
start, end = self.get_time_range()
filters = {
'project_id': project_ids,
'environment': [environment_id],
}
# NB we add release as a condition rather than a filter because
# this method is already dealing with version strings rather than
# release ids which would need to be translated by the snuba util.
tag = 'sentry:release'
col = 'tags[{}]'.format(tag)
conditions = [[col, 'IN', versions]]
aggregations = [
['count()', '', 'times_seen'],
['min', SEEN_COLUMN, 'first_seen'],
['max', SEEN_COLUMN, 'last_seen'],
]
result = snuba.query(start, end, ['project_id', col],
conditions, filters, aggregations,
referrer='tagstore.get_release_tags')
values = []
for project_data in six.itervalues(result):
for value, data in six.iteritems(project_data):
values.append(
TagValue(
key=tag,
value=value,
**fix_tag_value_data(data)
)
)
return set(values)
def get_group_ids_for_users(self, project_ids, event_users, limit=100):
start, end = self.get_time_range()
filters = {
'project_id': project_ids,
}
conditions = [
['tags[sentry:user]', 'IN', filter(None, [eu.tag_value for eu in event_users])],
]
aggregations = [['max', SEEN_COLUMN, 'last_seen']]
result = snuba.query(start, end, ['issue'], conditions, filters,
aggregations, limit=limit, orderby='-last_seen',
referrer='tagstore.get_group_ids_for_users')
return set(result.keys())
def get_group_tag_values_for_users(self, event_users, limit=100):
start, end = self.get_time_range()
filters = {
'project_id': [eu.project_id for eu in event_users]
}
conditions = [
['tags[sentry:user]', 'IN', filter(None, [eu.tag_value for eu in event_users])]
]
aggregations = [
['count()', '', 'times_seen'],
['min', SEEN_COLUMN, 'first_seen'],
['max', SEEN_COLUMN, 'last_seen'],
]
result = snuba.query(start, end, ['issue', 'user_id'], conditions, filters,
aggregations, orderby='-last_seen', limit=limit,
referrer='tagstore.get_group_tag_values_for_users')
values = []
for issue, users in six.iteritems(result):
for name, data in six.iteritems(users):
values.append(
GroupTagValue(
group_id=issue,
key='sentry:user',
value=name,
**fix_tag_value_data(data)
)
)
return values
def get_groups_user_counts(self, project_id, group_ids, environment_id):
start, end = self.get_time_range()
filters = {
'project_id': [project_id],
'environment': [environment_id],
'issue': group_ids,
}
aggregations = [['uniq', 'tags[sentry:user]', 'count']]
result = snuba.query(start, end, ['issue'], None, filters, aggregations,
referrer='tagstore.get_groups_user_counts')
return defaultdict(int, {k: v for k, v in result.items() if v})
def get_tag_value_paginator(self, project_id, environment_id, key, query=None,
order_by='-last_seen'):
from sentry.api.paginator import SequencePaginator
if not order_by == '-last_seen':
raise ValueError("Unsupported order_by: %s" % order_by)
conditions = []
if query:
conditions.append(['tags_value', 'LIKE', '%{}%'.format(query)])
start, end = self.get_time_range()
results = snuba.query(
start=start,
end=end,
groupby=['tags_value'],
filter_keys={
'project_id': [project_id],
'environment': [environment_id],
'tags_key': [key],
},
aggregations=[
['count()', '', 'times_seen'],
['min', 'timestamp', 'first_seen'],
['max', 'timestamp', 'last_seen'],
],
conditions=conditions,
orderby=order_by,
# TODO: This means they can't actually paginate all TagValues.
limit=1000,
)
tag_values = [
TagValue(
key=key,
value=value,
**fix_tag_value_data(data)
) for value, data in six.iteritems(results)
]
desc = order_by.startswith('-')
score_field = order_by.lstrip('-')
return SequencePaginator(
[(int(to_timestamp(getattr(tv, score_field)) * 1000), tv) for tv in tag_values],
reverse=desc
)
def get_group_tag_value_iter(self, project_id, group_id, environment_id, key, callbacks=()):
start, end = self.get_time_range()
results = snuba.query(
start=start,
end=end,
groupby=['tags_value'],
filter_keys={
'project_id': [project_id],
'environment': [environment_id],
'tags_key': [key],
'issue': [group_id],
},
aggregations=[
['count()', '', 'times_seen'],
['min', 'timestamp', 'first_seen'],
['max', 'timestamp', 'last_seen'],
],
orderby='-first_seen', # Closest thing to pre-existing `-id` order
# TODO: This means they can't actually iterate all GroupTagValues.
limit=1000,
)
group_tag_values = [
GroupTagValue(
group_id=group_id,
key=key,
value=value,
**fix_tag_value_data(data)
) for value, data in six.iteritems(results)
]
for cb in callbacks:
cb(group_tag_values)
return group_tag_values
def get_group_tag_value_paginator(self, project_id, group_id, environment_id, key,
order_by='-id'):
from sentry.api.paginator import SequencePaginator
if order_by in ('-last_seen', '-first_seen'):
pass
elif order_by == '-id':
# Snuba has no unique id per GroupTagValue so we'll substitute `-first_seen`
order_by = '-first_seen'
else:
raise ValueError("Unsupported order_by: %s" % order_by)
group_tag_values = self.get_group_tag_value_iter(
project_id, group_id, environment_id, key
)
desc = order_by.startswith('-')
score_field = order_by.lstrip('-')
return SequencePaginator(
[(int(to_timestamp(getattr(gtv, score_field)) * 1000), gtv) for gtv in group_tag_values],
reverse=desc
)
def get_group_tag_value_qs(self, project_id, group_id, environment_id, key, value=None):
# This method is not implemented because it is only used by the Django
# search backend.
raise NotImplementedError
def get_event_tag_qs(self, project_id, environment_id, key, value):
# This method is not implemented because it is only used by the Django
# search backend.
raise NotImplementedError
def get_group_event_filter(self, project_id, group_id, environment_id, tags):
start, end = self.get_time_range()
filters = {
'project_id': [project_id],
'environment': [environment_id],
'issue': [group_id],
}
conditions = [[['tags[{}]'.format(k), '=', v] for (k, v) in tags.items()]]
result = snuba.raw_query(start, end, selected_columns=['event_id'],
conditions=conditions, orderby='-timestamp', filter_keys=filters,
limit=1000, referrer='tagstore.get_group_event_filter')
event_id_set = set(row['event_id'] for row in result['data'])
if not event_id_set:
return None
return {'event_id__in': event_id_set}
def get_group_ids_for_search_filter(
self, project_id, environment_id, tags, candidates=None, limit=1000):
# This method is not implemented since the `group.id` column doesn't
# exist in Snuba. This logic is implemented in the search backend
# instead.
raise NotImplementedError
| |
from __future__ import unicode_literals
from datetime import date
from django.contrib.auth import models, management
from django.contrib.auth.management import create_permissions
from django.contrib.auth.management.commands import changepassword
from django.contrib.auth.models import User
from django.contrib.auth.tests import CustomUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
from django.utils.six import StringIO
@skipIfCustomUser
class GetDefaultUsernameTestCase(TestCase):
def setUp(self):
self.old_get_system_username = management.get_system_username
def tearDown(self):
management.get_system_username = self.old_get_system_username
def test_actual_implementation(self):
self.assertIsInstance(management.get_system_username(), six.text_type)
def test_simple(self):
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), 'joe')
def test_existing(self):
models.User.objects.create(username='joe')
management.get_system_username = lambda: 'joe'
self.assertEqual(management.get_default_username(), '')
self.assertEqual(
management.get_default_username(check_db=False), 'joe')
def test_i18n(self):
# 'Julia' with accented 'u':
management.get_system_username = lambda: 'J\xfalia'
self.assertEqual(management.get_default_username(), 'julia')
@skipIfCustomUser
class ChangepasswordManagementCommandTestCase(TestCase):
def setUp(self):
self.user = models.User.objects.create_user(username='joe', password='qwerty')
self.stdout = StringIO()
self.stderr = StringIO()
def tearDown(self):
self.stdout.close()
self.stderr.close()
def test_that_changepassword_command_changes_joes_password(self):
"Executing the changepassword management command should change joe's password"
self.assertTrue(self.user.check_password('qwerty'))
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
command.execute("joe", stdout=self.stdout)
command_output = self.stdout.getvalue().strip()
self.assertEqual(command_output, "Changing password for user 'joe'\nPassword changed successfully for user 'joe'")
self.assertTrue(models.User.objects.get(username="joe").check_password("not qwerty"))
def test_that_max_tries_exits_1(self):
"""
A CommandError should be thrown by handle() if the user enters in
mismatched passwords three times.
"""
command = changepassword.Command()
command._get_pass = lambda *args: args or 'foo'
with self.assertRaises(CommandError):
command.execute("joe", stdout=self.stdout, stderr=self.stderr)
@skipIfCustomUser
class CreatesuperuserManagementCommandTestCase(TestCase):
def test_createsuperuser(self):
"Check the operation of the createsuperuser management command"
# We can use the management command to create a superuser
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = User.objects.get(username="joe")
self.assertEqual(u.email, 'joe@somewhere.org')
# created password should be unusable
self.assertFalse(u.has_usable_password())
def test_verbosity_zero(self):
# We can supress output on the management command
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe2",
email="joe2@somewhere.org",
verbosity=0,
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '')
u = User.objects.get(username="joe2")
self.assertEqual(u.email, 'joe2@somewhere.org')
self.assertFalse(u.has_usable_password())
def test_email_in_username(self):
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
username="joe+admin@somewhere.org",
email="joe@somewhere.org",
stdout=new_io
)
u = User._default_manager.get(username="joe+admin@somewhere.org")
self.assertEqual(u.email, 'joe@somewhere.org')
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"A superuser can be created when a custom User model is in use"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
call_command("createsuperuser",
interactive=False,
email="joe@somewhere.org",
date_of_birth="1976-04-01",
stdout=new_io,
skip_validation=True
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = CustomUser._default_manager.get(email="joe@somewhere.org")
self.assertEqual(u.date_of_birth, date(1976, 4, 1))
# created password should be unusable
self.assertFalse(u.has_usable_password())
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user_missing_required_field(self):
"A Custom superuser won't be created when a required field isn't provided"
# We can use the management command to create a superuser
# We skip validation because the temporary substitution of the
# swappable User model messes with validation.
new_io = StringIO()
with self.assertRaises(CommandError):
call_command("createsuperuser",
interactive=False,
username="joe@somewhere.org",
stdout=new_io,
stderr=new_io,
skip_validation=True
)
self.assertEqual(CustomUser._default_manager.count(), 0)
class PermissionDuplicationTestCase(TestCase):
def setUp(self):
self._original_permissions = models.Permission._meta.permissions[:]
def tearDown(self):
models.Permission._meta.permissions = self._original_permissions
def test_duplicated_permissions(self):
"""
Test that we show proper error message if we are trying to create
duplicate permissions.
"""
# check duplicated default permission
models.Permission._meta.permissions = [
('change_permission', 'Can edit permission (duplicate)')]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'change_permission' clashes with a "
"builtin permission for model 'auth.Permission'.",
create_permissions, models, [], verbosity=0)
# check duplicated custom permissions
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
('my_custom_permission', 'Some permission with duplicate permission code'),
]
six.assertRaisesRegex(self, CommandError,
"The permission codename 'my_custom_permission' is duplicated for model "
"'auth.Permission'.",
create_permissions, models, [], verbosity=0)
# should not raise anything
models.Permission._meta.permissions = [
('my_custom_permission', 'Some permission'),
('other_one', 'Some other permission'),
]
create_permissions(models, [], verbosity=0)
| |
#!/usr/bin/env python3
# coding=utf-8
"""
Multiple helpers for triggering the bugs more easily. All Triggers should be subclasses of RawTrigger
"""
__author__ = "Benjamin Schubert, benjamin.schubert@epfl.ch"
from abc import ABCMeta, abstractmethod
from contextlib import suppress
import queue
import multiprocessing
import logging
import os
import re
import subprocess
import resource
from threading import Thread
import time
from lib.helper import launch_and_log
from lib.trigger.benchmark import BenchmarkWithHelper, ApacheBenchmark, RawBenchmark, BaseBenchmark
from lib.trigger.helper import BaseHelper, UrlFetcherHelper
from lib.parsers.configuration import get_trigger_conf
class RawTrigger(metaclass=ABCMeta):
"""
The base trigger for the bugs. All bug triggers should inherit it
"""
def __init__(self):
"""
Fetches the configuration of the program and stores it in conf.
"""
self.__cmd__ = None
self.__returned_information__ = None
self.conf = get_trigger_conf(self.program)
@property # pragma nocover
@abstractmethod
def program(self) -> str:
""" The program to run """
@property # pragma nocover
@abstractmethod
def benchmark(self) -> RawBenchmark:
""" The Benchmarking class to use for this class """
@abstractmethod
def run(self) -> int:
"""
The main function used to make the program trigger the bug. Should return the value of self.check_success()
:return: 0|1|None on success|failure|unexpected error
"""
@abstractmethod
def check_success(self, *args, **kwargs) -> int:
"""
Checks whether the run was successful or not
:param args: other arguments
:param kwargs: other keyword arguments
:return: 0|1|None on success|failure|unexpected result
"""
@property
def cmd(self) -> str:
"""
The command to execute in a subprocess
"""
return self.__cmd__
@cmd.setter
def cmd(self, cmd: str) -> None:
"""
cmd setter
:param cmd: the new command
"""
self.__cmd__ = cmd
@property
def returned_information(self) -> list:
"""
Used to store results for the runs. This is by default None, and analysis plugins (such as benchmark) can add
things to it
"""
return self.__returned_information__
@returned_information.setter
def returned_information(self, returned_information: object) -> None:
"""
Sets the result to the given value
:param returned_information: the object to set up as a result
"""
self.__returned_information__ = returned_information
@staticmethod
def __preexec_fn__() -> None:
"""
A helper for setting the RLIMIT object to infinity, to be able to return full coredumps
"""
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
# noinspection PyAbstractClass
class BaseTrigger(RawTrigger, metaclass=ABCMeta):
"""
A base trigger for running a single program and checking if the error is the good one
"""
def __init__(self):
super().__init__()
self.cmd = self.failure_cmd
@property # pragma nocover
@abstractmethod
def failure_cmd(self) -> str:
""" Defines the command to run on failure runs """
@property # pragma nocover
@abstractmethod
def success_cmd(self) -> str:
"""
Defines the command to run on successful runs. This can be the same as failure_cmd for cases where the bug is
not input dependent.
In this case just define as:
@property
def success_cmd(self) -> str:
return self.failure_cmd
"""
@property # pragma nocover
@abstractmethod
def expected_failure(self) -> int:
""" The error code the program should return if the bug was successfully triggered """
@property
def benchmark(self) -> BaseBenchmark:
"""
returns the benchmarking class to use for this
"""
return BaseBenchmark
def check_success(self, error_code: int, *args, **kwargs) -> int:
"""
Checks for the success of the trigger result.
:param error_code: the error code returned by the trigger
:param args: additional arguments
:param kwargs: additional keyword arguments
:return: 0|1|None on success|expected failure|unexpected failure
"""
if error_code == self.expected_failure:
return 1
if error_code == 0:
return 0
logging.verbose("Got error code {}, expected {}".format(error_code, self.expected_failure))
return None
def run(self) -> int:
"""
Runs the cmd program in a subprocess, with rlimit set and checks the output to be sure that it is the correct
bug
:return: 0|1|None on success|failure|unexpected result
"""
logging.verbose(self.cmd)
error_code = 0
try:
# noinspection PyTypeChecker
launch_and_log(self.cmd, shell=True, preexec_fn=self.__preexec_fn__)
except subprocess.CalledProcessError as exc:
error_code = exc.returncode
return self.check_success(error_code=error_code)
# noinspection PyAbstractClass
class TriggerWithHelper(RawTrigger, metaclass=ABCMeta):
"""
A trigger using a helper thread for spawning process. Especially useful when triggering bug on servers with clients
"""
class Server(Thread):
"""
Thread to launch the subprocess for the server, in case some server don't go in background
"""
def __init__(self, command: str):
super().__init__()
self.__output__ = None
self.command = command
def run(self):
"""
Launches the command and waits for the output
"""
with suppress(subprocess.CalledProcessError):
launch_and_log(self.command.split(" "), preexec_fn=TriggerWithHelper.__preexec_fn__)
def __init__(self):
super().__init__()
self.__cmd__ = self.start_cmd
@property # pragma nocover
@abstractmethod
def helper(self) -> BaseHelper:
""" The helper to use """
@property # pragma nocover
@abstractmethod
def delay(self) -> int:
""" The delay to wait between each program call, to let the server time to set up correctly """
@property # pragma nocover
@abstractmethod
def helper_commands(self) -> list:
""" A list of commands to call in the helpers """
@property # pragma nocover
@abstractmethod
def start_cmd(self) -> str:
""" A function that will be executed by subprocess to start the server """
@property # pragma nocover
@abstractmethod
def stop_cmd(self) -> str:
""" A function that will be executed by subprocess. This should start the server """
@property
def benchmark(self) -> BenchmarkWithHelper:
"""
Returns the benchmarking class to use with this class
"""
return BenchmarkWithHelper
@property
def timeout(self) -> int: # pylint: disable=no-self-use
"""
Timeout in seconds to wait before killing the thread. Will be done once per helper command
"""
return None
@property
def named_helper_args(self) -> dict: # pylint: disable=no-self-use
"""
Additional arguments to pass to the helper
"""
return {}
def run(self) -> int:
"""
Main function. Calls every other one in order to make the bug trigger
:return: 0|1|None on success|failure|unexpected event
"""
try:
logging.verbose(self.cmd)
proc_start = self.Server(self.cmd) # this is not a typo. Using cmd is REQUIRED for the sake of plugins
proc_start.start()
time.sleep(self.delay)
triggers = []
results_queue = multiprocessing.Queue() # pylint: disable=no-member
for command in self.helper_commands:
# noinspection PyCallingNonCallable
triggers.append(self.helper(command, results=results_queue, **self.named_helper_args))
for thread in triggers:
thread.start()
for thread in triggers:
thread.join(self.timeout)
for thread in triggers:
thread.terminate()
finally:
with suppress(subprocess.CalledProcessError):
launch_and_log(self.stop_cmd.split(" "))
results = []
for _ in triggers:
with suppress(queue.Empty):
results.append(results_queue.get_nowait())
time.sleep(self.delay)
return self.check_success(results=results)
# noinspection PyAbstractClass
class ApacheTrigger(TriggerWithHelper):
"""
A trigger specifically designed for apache
"""
@property # pragma nocover
@abstractmethod
def error_pattern(self) -> str:
""" The error pattern to search in error_log """
@property
def benchmark(self) -> ApacheBenchmark:
"""
Returns the correct benchmark for Apache programs (Using Apache Bench utility)
"""
return ApacheBenchmark
@property
def start_cmd(self) -> str:
"""
The start command for apache
"""
return "{}/bin/httpd -k start".format(self.conf.getdir("install_directory"))
@property
def stop_cmd(self) -> str:
"""
The stop command for apache
"""
return "{}/bin/httpd -k stop".format(self.conf.getdir("install_directory"))
def clean_logs(self) -> None:
"""
Cleans the log files before running an experiment
"""
with suppress(FileNotFoundError):
os.remove(os.path.join(self.conf.getdir("install_directory"), "logs/error_log"))
os.remove(os.path.join(self.conf.getdir("install_directory"), "logs/access_log"))
@property
def env(self) -> dict: # pylint: disable=no-self-use
"""
The environment variables to use when running process
"""
_env_ = os.environ.copy()
_env_["APACHE_RUN_USER"] = _env_["USER"]
_env_["APACHE_RUN_GROUP"] = _env_["USER"]
return _env_
@property
def delay(self) -> int:
"""
A delay of 2 should be sufficient for all apache's instance
"""
return 2
@property
def named_helper_args(self) -> dict:
"""
Adds the listening port and the number of iteration to do to the arguments passed to the helper
"""
return {
"port": self.conf["listening_port"],
"iterations": 1000
}
@property
def helper(self) -> UrlFetcherHelper:
"""
By default, triggering apache's bug is done with the UrlFetcherHelper
"""
return UrlFetcherHelper
def check_success(self, *args, **kwargs) -> int:
"""
Checks in the error log for the string we expect
:param args: other arguments
:param kwargs: other keyword arguments
:return: 0|1|None on success|Failure|unexpected error
"""
if not os.path.exists(os.path.join(self.conf.getdir("install_directory"), "logs/error_log")):
return 0
with open(os.path.join(self.conf.getdir("install_directory"), "logs/error_log")) as error_log:
for line in error_log:
if re.match(self.error_pattern, line):
logging.debug(line)
logging.debug("Found the pattern in apache's error log")
return 1
logging.debug("No error pattern in apache's error log")
return 0
def run(self) -> int:
"""
clean apache's log before calling it's parent run function and returning its value
:return 0|1|None on success|failure|unexpected failure
"""
self.clean_logs()
return super().run()
| |
'''
Copyright 2015 Planet Labs, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import numpy
import logging
from radiometric_normalization import gimage
def generate(image_paths, output_path,
method='mean_with_uniform_weight',
image_nodata=None):
'''Synthesizes a time stack image set into a single reference image.
All images in time stack must:
- contain the same number of bands
- have the same band order
- have the same size
- have the same nodata value
- have the same geographic metadata
The output file is only supposed to be used internally so options to change
the nodata value and the datatype are not exposed.
- The output image data type is uint16
- The output nodata values are indicated by a 0 in the alpha band
Input:
image_paths (list of str): A list of paths for input time stack images
output_path (str): A path to write the file to
method (str): Time stack analysis method [Identity]
image_nodata (int): [Optional] Manually provide a no data value
'''
output_datatype = numpy.uint16
if method == 'mean_with_uniform_weight':
output_gimage = mean_with_uniform_weight(
image_paths, output_datatype, image_nodata)
else:
raise NotImplementedError("Only 'mean_with_uniform_weight'"
"method is implemented")
gimage.save(output_gimage, output_path, compress=False)
def _sum_masked_array_list(sum_masked_arrays,
frequency_arrays,
new_masked_arrays):
''' Calculates the sum of two lists of masked arrays
Input:
sum_masked_arrays (list of masked arrays): A list of masked
arrays (one for each band)
frequency_arrays (list of arrays of ints): Keeping track of
how many times each pixel is summed (one for each band)
new_masked_arrays (list of masked arrays): A list of masked
arrays (one for each band)
Output:
sum_masked_array (masked array): A list of masked
arrays (one for each band)
'''
no_bands = len(sum_masked_arrays)
for band_index in xrange(no_bands):
sum_masked_arrays[band_index] = \
numpy.ma.sum([sum_masked_arrays[band_index],
new_masked_arrays[band_index]],
axis=0)
frequency_arrays[band_index] = frequency_arrays[band_index] + \
numpy.logical_not(new_masked_arrays[band_index].mask).astype('int')
return sum_masked_arrays, frequency_arrays
def _masked_arrays_from_gimg(input_gimg, working_datatype):
'''A gimage as input and outputs a masked array of output_datatype.
Input:
input_gimg (a gimage): A gimage to convert
working_datatype (numpy datatype): The datatype in use for
the calculations
Output:
all_bands (list of numpy arrays): A list of each band of the gimage
as a masked array
'''
no_bands = len(input_gimg.bands)
all_bands_masked_array_list = []
for band_index in xrange(no_bands):
one_band_masked_array = \
numpy.ma.masked_array(
input_gimg.bands[band_index].astype(working_datatype),
input_gimg.alpha == 0)
all_bands_masked_array_list.append(one_band_masked_array)
return all_bands_masked_array_list
def _uniform_weight_alpha(sum_masked_arrays, output_datatype):
'''Calculates the cumulative mask of a list of masked array
Input:
sum_masked_arrays (list of numpy masked arrays): The list of
masked arrays to find the cumulative mask of, each element
represents one band.
(sums_masked_array.mask has a 1 for a no data pixel and
a 0 otherwise)
output_datatype (numpy datatype): The output datatype
Output:
output_alpha (numpy uint16 array): The output mask
(0 for a no data pixel, uint16 max value otherwise)
'''
output_alpha = numpy.ones(sum_masked_arrays[0].shape)
for band_sum_masked_array in sum_masked_arrays:
output_alpha[numpy.nonzero(band_sum_masked_array.mask == 1)] = 0
output_alpha = output_alpha.astype(output_datatype) * \
numpy.iinfo(output_datatype).max
return output_alpha
def _mean_from_sum(sum_masked_arrays,
frequency_arrays,
output_datatype):
''' Calculates the mean from the summation of all the images
Input:
sum_masked_arrays (list of numpy masked arrays): The list of
masked arrays to find the mean of, each element of the list
represents one band.
(sums_masked_array.mask has a 1 for a no data pixel and
a 0 otherwise)
frequency_arrays (numpy array of ints): The number of times each
pixel has been summed
output_datatype (numpy data type): The output datatype
Output:
output_mean(list of numpy arrays): A list of the means of the
images. Each element of the list represents one band.
'''
no_bands = len(sum_masked_arrays)
output_mean = []
for band_index in xrange(no_bands):
output_band = numpy.zeros(frequency_arrays[band_index].shape)
good_indices = numpy.nonzero(frequency_arrays[band_index] != 0)
output_band[good_indices] = \
sum_masked_arrays[band_index].data[good_indices] / \
frequency_arrays[band_index][good_indices]
output_mean.append(output_band.astype(output_datatype))
return output_mean
def mean_with_uniform_weight(image_paths, output_datatype, image_nodata):
''' Calculates the reference image as the mean of each band with uniform
weighting (zero for nodata pixels, 2 ** 16 - 1 for valid pixels)
The input are a set of uint16 geotiffs, the output is a uint16 geotiff but
inbetween we use numpy double masked arrays so that we can safely take the
summation of all the values without reaching the maximum value.
This function is written so that it should only load two gimages into
memory at any one time (to save memory when analysing lists of > 100
images)
Input:
image_paths (list of strings): A list of image paths for each image
output_datatype (numpy datatype): Data type for the output image
Output:
output_gimage (gimage): The mean for each band and the weighting in a
gimage data format
'''
logging.info('Time stack analysis is using: Mean with uniform weight.')
working_datatype = numpy.double
no_images = len(image_paths)
first_gimg = gimage.load(image_paths[0], image_nodata)
sum_masked_arrays = _masked_arrays_from_gimg(first_gimg,
working_datatype)
no_bands = len(sum_masked_arrays)
frequency_arrays = \
[numpy.logical_not(sum_masked_arrays[band_index].mask).astype('int')
for band_index in xrange(no_bands)]
for image_index in xrange(1, no_images):
new_gimg = gimage.load(image_paths[image_index])
gimage.check_comparable([first_gimg, new_gimg], check_metadata=True)
new_masked_arrays = _masked_arrays_from_gimg(new_gimg,
working_datatype)
sum_masked_arrays, frequency_arrays = _sum_masked_array_list(
sum_masked_arrays, frequency_arrays, new_masked_arrays)
output_alpha = _uniform_weight_alpha(sum_masked_arrays, output_datatype)
output_bands = _mean_from_sum(sum_masked_arrays, frequency_arrays,
output_datatype)
output_gimage = gimage.GImage(output_bands, output_alpha,
first_gimg.metadata)
return output_gimage
| |
#!/usr/bin/env python
"""Module to manage Planet4 markings."""
import argparse
import itertools
import logging
import math
from math import cos, degrees, pi, radians, sin
import matplotlib.lines as lines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.patches import Ellipse
from numpy import arctan2
from numpy import linalg as LA
from shapely import affinity
from shapely import geometry as geom
from . import io
from .exceptions import NoFilesFoundError
LOGGER = logging.getLogger(__name__)
IMG_X_SIZE = 840
IMG_Y_SIZE = 648
IMG_SHAPE = (IMG_Y_SIZE, IMG_X_SIZE)
GOLD_MEMBERS = ['michaelaye', 'mschwamb', 'Portyankina'] # 'CJ-DPI']
GOLD_PLOT_COLORS = list('bmg')
def example_p4id():
db = io.DBManager()
return TileID('APF00002rq', data=db.get_image_id_markings('APF00002rq'))
def calc_fig_size(width):
"""Calc figure height in ratio of subframes."""
ratio = IMG_X_SIZE / IMG_Y_SIZE
return (width, width / ratio)
def gold_legend(ax):
lcolors = list('cmyg')
line1 = plt.Line2D(range(10), range(10), marker='o', color=lcolors[0])
line2 = plt.Line2D(range(10), range(10), marker='o', color=lcolors[1])
line3 = plt.Line2D(range(10), range(10), marker='o', color=lcolors[2])
line4 = plt.Line2D(range(10), range(10), marker='o', color=lcolors[3])
ax.legend((line1, line2, line3, line4),
GOLD_MEMBERS, numpoints=2, bbox_to_anchor=(1., 1.), fontsize=7)
def set_upper_left_corner(ul_x, ul_y):
"""Only works with PyQT this way!"""
mngr = plt.get_current_fig_manager()
# to put it into the upper left corner for example:
geom = mngr.window.geometry()
_, _, dx, dy = geom.getRect()
mngr.window.setGeometry(ul_x, ul_y, dx, dy)
def diffangle(v1, v2, rads=True):
""" Returns the angle in radians between vectors 'v1' and 'v2'."""
cosang = np.dot(v1, v2)
sinang = LA.norm(np.cross(v1, v2))
res = np.arctan2(sinang, cosang)
return res if rads else degrees(res)
def set_subframe_size(ax):
"""Set plot view limit on Planet 4 subframe size."""
ax.set_xlim(0, IMG_X_SIZE)
ax.set_ylim(IMG_Y_SIZE, 0)
class TileID(object):
"""Manage Planet 4 Image ids, getting data, plot stuff etc.
At init this class will get the data for the given `imgid` using either the latest
found database file or the optionally provided one.
Parameters
----------
imgid : str
Planet4 image_id
dbname : str, optional
Filepath to database name. The marking data for `imgid` will be extracted.
Default: Latest one.
data : pd.DataFrame, optional
If the data was already extracted before init, it can be provided here.
"""
def __init__(self, imgid, scope='planet4', dbname=None, data=None):
self.imgid = io.check_and_pad_id(imgid)
self._data = data
self.scope = scope
self.dbname = dbname
@property
def data(self):
if self._data is not None:
return self._data
try:
db = io.DBManager(self.dbname)
self._data = db.get_image_id_markings(self.imgid)
return self._data
except NoFilesFoundError:
print("Cannot find PlanetFour database.")
return None
@property
def image_name(self):
return self.data.image_name.iloc[0]
@property
def tile_coords(self):
return self.data[['x_tile', 'y_tile']].drop_duplicates().values[0]
@property
def blotchmask(self):
return self.data.marking == 'blotch'
@property
def fanmask(self):
return self.data.marking == 'fan'
@property
def n_marked_classifications(self):
return self.data[self.blotchmask | self.fanmask].classification_id.nunique()
@property
def subframe(self):
"np.array : Get tile url and return image tile using io funciton."
url = self.data.iloc[0].image_url
return io.get_subframe(url)
def filter_data(self, kind, user_name=None, without_users=None):
"""Filter and return data for kind, user, or without_users.
Parameters
----------
kind : {'fan', 'blotch'}
Marking
user_name : str
Filter data further for `user_name`
without_users : list(strings)
Only return data that is not in list of user_names (useful for non-gold data)
"""
mask = self.data.marking == kind
if user_name is not None:
mask = (mask) & (self.data.user_name == user_name)
if without_users is not None:
mask = (mask) & (~self.data.user_name.isin(without_users))
return self.data[mask]
def get_fans(self, user_name=None, without_users=None):
"""Return data for fan markings."""
return self.filter_data('fan', user_name, without_users)
def get_blotches(self, user_name=None, without_users=None):
"""Return data for blotch markings."""
return self.filter_data('blotch', user_name, without_users)
def show_subframe(self, ax=None, aspect='auto'):
if ax is None:
fig, ax = plt.subplots(figsize=calc_fig_size(8))
ax.imshow(self.subframe, origin='upper', aspect=aspect)
ax.set_axis_off()
def plot_objects(self, objects, n=None, img=True, ax=None,
user_color=None, user_colors=None, wind_pointer=False):
"""Plotting either fans or blotches with p4 subframe background."""
LOGGER.debug("Entering markings.plot_objects")
LOGGER.debug("Received %i objects to plot.", len(objects))
if ax is None:
_, ax = plt.subplots(figsize=calc_fig_size(8))
LOGGER.debug("Created own axis.")
if img:
LOGGER.debug("Plotting background image.")
self.show_subframe(ax)
counter = 0
if user_colors is None:
colors = itertools.cycle(sns.color_palette('bright', 12))
else:
colors = user_colors
for obj, color in zip(objects, colors):
if user_color is not None:
color = user_color
obj.plot(color=color, ax=ax)
if wind_pointer is True and isinstance(obj, Fan):
obj.add_mean_wind_pointer(color=color, ax=ax)
counter += 1
if counter == n:
break
set_subframe_size(ax)
ax.set_axis_off()
def pop_kwargs(self, kwargs):
with_center = kwargs.pop('with_center', False)
user_name = kwargs.pop('user_name', None)
without_users = kwargs.pop('without_users', None)
lw = kwargs.pop('lw', 1)
return with_center, user_name, without_users, lw
def plot_markings(self, kind, data=None, **kwargs):
"""General plotting method for both fans and blotches.
Also accepts any derived set of objects in the `data` parameter.
Parameters:
----------
kind : {'blotch','fan'}
Switch between specialized plotting features
**kwargs : {dict}
Any keywords to hand down to other methods here and/or matplotlib
data : {list, pd.core.frame.DataFrame}, optional
Any marking objects related to this image_id (the default is None, which will plot the raw data.)
"""
with_center, user_name, without_users, lw = self.pop_kwargs(kwargs)
if data is None:
data = self.filter_data(kind, user_name, without_users)
markingClass = Blotch if kind == 'blotch' else Fan
if type(data) == pd.core.frame.DataFrame:
data = [markingClass(i, self.scope, with_center=with_center, lw=lw)
for _, i in data.iterrows()]
self.plot_objects(data, **kwargs)
def plot_blotches(self, data=None, **kwargs):
self.plot_markings('blotch', data=data, **kwargs)
def plot_fans(self, data=None, **kwargs):
self.plot_markings('fan', data=data, **kwargs)
def plot_all(self):
fig, axes = plt.subplots(2, 2)
axes = axes.ravel()
for i in [0, 2]:
self.show_subframe(ax=axes[i])
self.plot_fans(ax=axes[1])
self.plot_blotches(ax=axes[3])
for ax in axes:
ax.set_axis_off()
fig.subplots_adjust(left=None, top=None, bottom=None, right=None,
wspace=1e-3, hspace=1e-3)
plt.show()
class Blotch(Ellipse):
"""Blotch management class for P4.
Parameters
----------
data : object with blotch data attributes
object should provide attributes [`x`, `y`, `radius_1`, `radius_2`, `angle`]
scope : {'planet4', 'hirise'}
string that decides between using x/y or image_x/image_y as center corods
color : str, optional
to control the color of the mpl.Ellipse object
Attributes
----------
to_average : list
List of cols to be averaged after clustering
data : object with blotch data attributes, as provided by `data`
center : tuple (inherited from matplotlib.Ellipse)
Coordinates of center, i.e. self.x, self.y
"""
to_average = 'x y image_x image_y angle radius_1 radius_2'.split()
def __init__(self, data, scope='planet4', with_center=False, **kwargs):
self.data = data
self.scope = scope if scope is not None else 'planet4'
self.with_center = with_center
if scope not in ['hirise', 'planet4']:
raise TypeError('Unknown scope: {}'.format(scope))
try:
self.x = data.x if scope == 'planet4' else data.image_x
self.y = data.y if scope == 'planet4' else data.image_y
except AttributeError:
print("No x and y attributes in data:\n{}"
.format(data))
raise AttributeError
# default member number is 1. This is set to the cluster member inside
# clustering execution.
self._n_members = 1
super(Blotch, self).__init__((self.x, self.y),
data.radius_1 * 2, data.radius_2 * 2,
data.angle, alpha=0.65,
fill=False, **kwargs)
self.data = data
def is_equal(self, other):
if self.data.x == other.data.x and\
self.data.y == other.data.y and\
self.data.image_x == other.data.image_y and\
self.data.image_y == other.data.image_y and\
self.data.radius_1 == other.data.radius_1 and\
self.data.radius_2 == other.data.radius_2 and\
self.data.angle == other.data.angle:
return True
else:
return False
def to_shapely(self):
"""Convert a markings.Blotch to shapely Ellipse.
Code from https://gis.stackexchange.com/questions/243459/drawing-ellipse-with-shapely/243462
"""
circ = geom.Point(self.center).buffer(1)
ell = affinity.scale(circ, self.data.radius_1, self.data.radius_2)
ellr = affinity.rotate(ell, self.data.angle)
return ellr
@property
def area(self):
return pi * self.data.radius_1 * self.data.radius_2
@property
def x1(self):
return math.cos(math.radians(self.angle)) * self.data.radius_1
@property
def y1(self):
return math.sin(self.angle) * self.data.radius_1
@property
def p1(self):
return np.array(self.center) + np.array([self.x1, self.y1])
@property
def p2(self):
return np.array(self.center) - np.array([self.x1, self.y1])
@property
def x2(self):
return math.cos(math.radians(self.angle + 90)) * self.data.radius_2
@property
def y2(self):
return math.sin(math.radians(self.angle + 90)) * self.data.radius_2
@property
def p3(self):
return np.array(self.center) + np.array([self.x2, self.y2])
@property
def p4(self):
return np.array(self.center) - np.array([self.x2, self.y2])
@property
def limit_points(self):
return [self.p1, self.p2, self.p3, self.p4]
def plot_center(self, ax, color='b'):
ax.scatter(self.x, self.y, color=color,
s=20, marker='.')
def plot_limit_points(self, ax, color='b'):
for x, y in self.limit_points:
ax.scatter(x, y, color=color, s=20, marker='o')
@property
def n_members(self):
return self._n_members
@n_members.setter
def n_members(self, value):
self._n_members = value
def plot(self, color=None, ax=None):
if ax is None:
_, ax = plt.subplots()
if color is not None:
self.set_color(color)
ax.add_patch(self)
if self.with_center:
self.plot_center(ax, color=color)
def store(self, fpath=None):
out = self.data
for p in range(1, 5):
attr = 'p' + str(p)
point = getattr(self, attr)
out[attr + '_x'] = point[0]
out[attr + '_y'] = point[1]
if 'image_id' not in out.index:
out['image_id'] = self.image_id
if fpath is not None:
out.to_hdf(str(fpath.with_suffix('.hdf')), 'df')
out['n_members'] = self.n_members
return out
def __str__(self):
s = "markings.Blotch object. Input data:\n"
s += self.data.__str__()
s += '\n'
s += "N_members: {}".format(self.n_members)
return s
def __repr__(self):
return self.__str__()
class HiBlotch(Blotch):
def __init__(self, *args, **kwargs):
super().__init__(*args, scope='hirise', **kwargs)
def rotate_vector(v, angle):
"""Rotate vector by angle given in degrees.
Parameters
----------
v : np.array
Vector to be rotated
angle : float
Angle in degrees
"""
rangle = radians(angle)
rotmat = np.array([[cos(rangle), -sin(rangle)],
[sin(rangle), cos(rangle)]])
return rotmat.dot(v)
class Fan(lines.Line2D):
"""Fan management class for P4.
Parameters
----------
data : object with fan data attributes
object has to provide [`x`, `y`, `angle`, `spread`, `distance`]
scope : {'planet4', 'hirise'}
string that decides between using x/y or image_x/image_y as base coords
kwargs : dictionary, optional
Attributes
----------
to_average : list
List of columns to average after clustering
data : object with fan data attributes
as provided by `data`.
base : tuple
base coordinates `x` and `y`.
inside_half : float
`data` divided by 2.0.
armlength : float
length of the fan arms.
v1 : float[2]
vector of first arm of fan.
v2 : float[2]
vector of second arm of fan.
coords : float[3, 2]
Set of coords to draw for MPL.Line2D object: arm1->base->arm2
circle_base
center
radius
midpoint
base_to_midpoint_vec
"""
to_average = 'x y image_x image_y angle spread distance'.split()
def __init__(self, data, scope='planet4', with_center=False, **kwargs):
self.data = data
self.scope = scope if scope is not None else 'planet4'
self.with_center = with_center
if scope not in ['hirise', 'planet4']:
raise TypeError('Unknown scope: {}'.format(scope))
# first coordinate is the base of fan
actual_x = 'x' if scope == 'planet4' else 'image_x'
actual_y = 'y' if scope == 'planet4' else 'image_y'
try:
self.base = self.data.loc[[
actual_x, actual_y]].values.astype('float')
except KeyError:
print("No x and y in the data:\n{}".format(data))
raise KeyError
# default n_members value (property)
self._n_members = 1
# angles
self.inside_half = self.data.spread / 2.0
alpha = self.data.angle - self.inside_half
beta = self.data.angle + self.inside_half
# length of arms
self.armlength = self.get_arm_length()
# first arm
self.v1 = rotate_vector([self.armlength, 0], alpha)
# second arm
self.v2 = rotate_vector([self.armlength, 0], beta)
# vector matrix, stows the 1D vectors row-wise
self.coords = np.vstack((self.base + self.v1,
self.base,
self.base + self.v2))
# init fan line, first column are the x-components of the row-vectors
lines.Line2D.__init__(self, self.coords[:, 0], self.coords[:, 1],
alpha=0.65, color='white',
**kwargs)
def is_equal(self, other):
if self.data.x == other.data.x and\
self.data.y == other.data.y and\
self.data.image_x == other.data.image_x and\
self.data.image_y == other.data.image_y and\
self.data.angle == other.data.angle and\
self.data.spread == other.data.spread and\
self.data.distance == other.data.distance:
return True
else:
return False
@property
def n_members(self):
return self._n_members
@n_members.setter
def n_members(self, value):
self._n_members = value
def get_arm_length(self):
half = radians(self.inside_half)
return self.data.distance / (cos(half) + sin(half))
@property
def area(self):
tr_h = np.sqrt(self.armlength**2 - self.radius**2)
tr_area = tr_h * self.radius
half_circ_area = 0.5 * pi * self.radius**2
return tr_area + half_circ_area
@property
def circle_base(self):
"float[2] : Vector between end of first arm and second arm of fan."
return self.v1 - self.v2
@property
def semi_circle_center(self):
"""float[2] : vector from base to mid-point between end of arms.
This is used for the drawing of the semi-circle at the end of the
two fan arms.
"""
return self.base + self.v2 + 0.5 * self.circle_base
@property
def radius(self):
"float : for the semi-circle wedge drawing at the end of fan."
return 0.5 * LA.norm(self.circle_base)
def add_semicircle(self, ax, color='b'):
"Draw a semi-circle at end of fan arms using MPL.Wedge."
# reverse order of arguments for arctan2 input requirements
theta1 = degrees(arctan2(*self.circle_base[::-1]))
theta2 = theta1 + 180
wedge = mpatches.Wedge(self.semi_circle_center, self.radius, theta1, theta2,
width=0.01 * self.radius, color=color, alpha=0.65)
ax.add_patch(wedge)
def add_mean_wind_pointer(self, ax, color='b', ls='-'):
"Draw a thicker mean wind direction pointer for better visibility in plots."
endpoint = rotate_vector([3 * self.armlength, 0], self.data.angle)
coords = np.vstack((self.base,
self.base + endpoint))
self.wind_pointer_end = coords[1]
pointer = lines.Line2D(coords[:, 0], coords[:, 1],
alpha=0.65, linewidth=1, linestyle=ls)
pointer.set_color(color)
ax.add_line(pointer)
def plot(self, color=None, ax=None):
if ax is None:
_, ax = plt.subplots()
set_subframe_size(ax)
if color is not None:
self.set_color(color)
ax.add_line(self)
self.add_semicircle(ax, color=color)
if self.with_center:
self.plot_center(ax, color=color)
@property
def center(self):
"""Calculate vector to half total length.
As total length, I define the armlength + the radius of the semi-circle
at the end.
"""
mid_point_vec = rotate_vector([0.5 * (self.armlength + self.radius),
0],
self.data.angle)
return self.base + mid_point_vec
def plot_center(self, ax, color='b'):
ax.scatter(self.center[0], self.center[1], color=color,
s=20, marker='.')
@property
def base_to_center_vec(self):
coords = np.vstack((self.base, self.center))
return coords
def add_midpoint_pointer(self, ax, color='b', ls='-'):
coords = self.base_to_center_vec
pointer = lines.Line2D(coords[:, 0], coords[:, 1],
alpha=0.65, linewidth=3, linestyle=ls)
pointer.set_color(color)
ax.add_line(pointer)
def __str__(self):
out = "markings.Fan object. Input data:\n"
out += 'base: {0}\narmlength: {1}\narm1: {2}\narm2: {3}'\
.format(self.base, self.armlength, self.base + self.v1,
self.base + self.v2)
return out
def __repr__(self):
return self.__str__()
def store(self, fpath=None):
out = self.data
for i, arm in enumerate([self.v1, self.v2]):
out['arm{}_x'.format(i + 1)] = (self.base + arm)[0]
out['arm{}_y'.format(i + 1)] = (self.base + arm)[1]
if 'image_id' not in out.index:
# out['image_id'] = self.image_id
raise AttributeError("Storage of image_id failed.")
if fpath is not None:
out.to_hdf(str(fpath.with_suffix('.hdf')), 'df')
out['n_members'] = self.n_members
return out
def to_shapely(self):
"""Create a shapely half circle rotated by the fan's angle.
Notes
=====
`Motivated by: <https://stackoverflow.com/a/30762727/680232>`_
"""
# Define the arc (presumably ezdxf uses a similar convention)
centerx, centery = self.semi_circle_center
# make a semi-circle first that points to the x-axis, rotate later.
start_angle = 270 # In degrees
# number of elements for the semi-circle
numsegments = 100
# The coordinates of the arc
theta = np.radians(np.linspace(start_angle, start_angle+180, numsegments))
x = centerx + self.radius * np.cos(theta)
y = centery + self.radius * np.sin(theta)
arc = geom.LineString(np.column_stack([x, y]))
rotated = affinity.rotate(arc, self.data.angle,
origin=tuple(self.semi_circle_center))
df = pd.DataFrame(np.vstack([self.coords[::-1][:2], np.array(rotated)]))
return geom.Polygon(df.round(2).drop_duplicates().values)
class HiFan(Blotch):
def __init__(self, *args, **kwargs):
super().__init__(*args, scope='hirise', **kwargs)
def calc_blotchiness(nfans, nblotches):
"""Calculate the fnotch value (or fan-ness)."""
return (nblotches) / (nfans + nblotches)
class Fnotch(object):
"""Manage Fnotch by providing a cut during output.
Parameters
----------
value : float
Fnotch value (= 1 - blotchiness), as calculated in clustering.ClusterManager()
fandata : pandas.Series
data set containing all required for Fan object (see `Fan`)
blotchdata : pandas.Series
data set containing all required for Blotch object (see `Blotch`)
"""
@classmethod
def from_series(cls, series, scope):
"Create Fnotch instance from series with fan_ and blotch_ indices."
fan = Fan(series.filter(regex='fan_').rename(lambda x: x[4:]),
scope=scope)
blotch = Blotch(series.filter(regex='blotch_').rename(lambda x: x[7:]),
scope=scope)
return cls(series.fnotch_value, fan, blotch, scope)
def __init__(self, fan, blotch, scope='planet4'):
self.fan = fan
self.blotch = blotch
self.scope = scope
self.data = pd.concat([fan, blotch], ignore_index=True)
self.data.index = ['fan', 'blotch']
blotchiness = calc_blotchiness(fan.iloc[0]['n_votes'],
blotch.iloc[0]['n_votes'])
self.data.loc['fan', 'vote_ratio'] = (1 - blotchiness) + 0.01
self.data.loc['blotch', 'vote_ratio'] = blotchiness - 0.01
def apply_cut(self, cut):
"""Return the right marking, depending on cut value.
If the cut is at 0.8, the fnotch value has to be equal or better before
we assign the fan to the Fnotch object. Otherwise we return a blotch.
Parameters
----------
cut : float
Level where we separate fan from blotch
Returns
-------
`Fan` or `Blotch` object, depending on `cut`
"""
row = self.data[self.data.vote_ratio > cut]
return row
# Marking = getattr(markings, row.index[0].title())
# return Marking(row)
def __repr__(self):
return self.data.__repr__()
def store(self, fpath=None):
out = pd.concat([self.fanstore, self.blotchstore])
out['fnotch_value'] = self.value
if fpath is not None:
out.to_hdf(str(fpath.with_suffix('.hdf')), 'df')
return out
class Container(object):
@classmethod
def from_df(cls, df, scope):
rows = [i for _, i in df.iterrows()]
return cls(rows, scope)
@classmethod
def from_fname(cls, fname, scope='planet4'):
if str(fname).endswith('.hdf'):
readfunc = pd.read_hdf
elif str(fname).endswith('.csv'):
readfunc = pd.read_csv
else:
raise TypeError("Can only work with '.csv' or '.hdf' files.")
df = readfunc(str(fname))
return cls.from_df(df, scope)
class FanContainer(Container):
def __init__(self, iterable, scope='planet4'):
self.content = [Fan(item, scope) for item in iterable]
class BlotchContainer(Container):
def __init__(self, iterable, scope='planet4'):
self.content = [Blotch(item, scope) for item in iterable]
# class FnotchContainer(Container):
# pass
# def __init__(self, iterable, scope):
# super().__init__(iterable, Fnotch.from_series)
def main():
plt.switch_backend('Qt5Agg')
parser = argparse.ArgumentParser()
parser.add_argument('imgid',
help='zooniverse image id to plot')
args = parser.parse_args()
imgid = TileID(args.imgid)
imgid.plot_all()
if __name__ == '__main__':
main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import tempfile
try:
import cStringIO as io
BytesIO = io.StringIO
except ImportError:
import io
BytesIO = io.BytesIO
import fixtures
import testscenarios
from pbr import packaging
from pbr.tests import base
class EmailTestCase(base.BaseTestCase):
def test_str_dict_replace(self):
string = 'Johnnie T. Hozer'
mapping = {'T.': 'The'}
self.assertEqual('Johnnie The Hozer',
packaging.canonicalize_emails(string, mapping))
class MailmapTestCase(base.BaseTestCase):
def setUp(self):
super(MailmapTestCase, self).setUp()
self.root_dir = self.useFixture(fixtures.TempDir()).path
self.mailmap = os.path.join(self.root_dir, '.mailmap')
def test_mailmap_with_fullname(self):
with open(self.mailmap, 'w') as mm_fh:
mm_fh.write("Foo Bar <email@foo.com> Foo Bar <email@bar.com>\n")
self.assertEqual({'<email@bar.com>': '<email@foo.com>'},
packaging.read_git_mailmap(self.root_dir))
def test_mailmap_with_firstname(self):
with open(self.mailmap, 'w') as mm_fh:
mm_fh.write("Foo <email@foo.com> Foo <email@bar.com>\n")
self.assertEqual({'<email@bar.com>': '<email@foo.com>'},
packaging.read_git_mailmap(self.root_dir))
def test_mailmap_with_noname(self):
with open(self.mailmap, 'w') as mm_fh:
mm_fh.write("<email@foo.com> <email@bar.com>\n")
self.assertEqual({'<email@bar.com>': '<email@foo.com>'},
packaging.read_git_mailmap(self.root_dir))
class SkipFileWrites(base.BaseTestCase):
scenarios = [
('changelog_option_true',
dict(option_key='skip_changelog', option_value='True',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None,
pkg_func=packaging.write_git_changelog, filename='ChangeLog')),
('changelog_option_false',
dict(option_key='skip_changelog', option_value='False',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None,
pkg_func=packaging.write_git_changelog, filename='ChangeLog')),
('changelog_env_true',
dict(option_key='skip_changelog', option_value='False',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True',
pkg_func=packaging.write_git_changelog, filename='ChangeLog')),
('changelog_both_true',
dict(option_key='skip_changelog', option_value='True',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True',
pkg_func=packaging.write_git_changelog, filename='ChangeLog')),
('authors_option_true',
dict(option_key='skip_authors', option_value='True',
env_key='SKIP_GENERATE_AUTHORS', env_value=None,
pkg_func=packaging.generate_authors, filename='AUTHORS')),
('authors_option_false',
dict(option_key='skip_authors', option_value='False',
env_key='SKIP_GENERATE_AUTHORS', env_value=None,
pkg_func=packaging.generate_authors, filename='AUTHORS')),
('authors_env_true',
dict(option_key='skip_authors', option_value='False',
env_key='SKIP_GENERATE_AUTHORS', env_value='True',
pkg_func=packaging.generate_authors, filename='AUTHORS')),
('authors_both_true',
dict(option_key='skip_authors', option_value='True',
env_key='SKIP_GENERATE_AUTHORS', env_value='True',
pkg_func=packaging.generate_authors, filename='AUTHORS')),
]
def setUp(self):
super(SkipFileWrites, self).setUp()
self.temp_path = self.useFixture(fixtures.TempDir()).path
self.root_dir = os.path.abspath(os.path.curdir)
self.git_dir = os.path.join(self.root_dir, ".git")
if not os.path.exists(self.git_dir):
self.skipTest("%s is missing; skipping git-related checks"
% self.git_dir)
return
self.filename = os.path.join(self.temp_path, self.filename)
self.option_dict = dict()
if self.option_key is not None:
self.option_dict[self.option_key] = ('setup.cfg',
self.option_value)
self.useFixture(
fixtures.EnvironmentVariable(self.env_key, self.env_value))
def test_skip(self):
self.pkg_func(git_dir=self.git_dir,
dest_dir=self.temp_path,
option_dict=self.option_dict)
self.assertEqual(
not os.path.exists(self.filename),
(self.option_value.lower() in packaging.TRUE_VALUES
or self.env_value is not None))
class GitLogsTest(base.BaseTestCase):
def setUp(self):
super(GitLogsTest, self).setUp()
self.temp_path = self.useFixture(fixtures.TempDir()).path
self.root_dir = os.path.abspath(os.path.curdir)
self.git_dir = os.path.join(self.root_dir, ".git")
self.useFixture(
fixtures.EnvironmentVariable('SKIP_GENERATE_AUTHORS'))
self.useFixture(
fixtures.EnvironmentVariable('SKIP_WRITE_GIT_CHANGELOG'))
def test_write_git_changelog(self):
exist_files = [os.path.join(self.root_dir, f)
for f in (".git", ".mailmap")]
self.useFixture(fixtures.MonkeyPatch(
"os.path.exists",
lambda path: os.path.abspath(path) in exist_files))
self.useFixture(fixtures.FakePopen(lambda _: {
"stdout": BytesIO("Author: Foo Bar "
"<email@bar.com>\n".encode('utf-8'))
}))
def _fake_read_git_mailmap(*args):
return {"email@bar.com": "email@foo.com"}
self.useFixture(fixtures.MonkeyPatch("pbr.packaging.read_git_mailmap",
_fake_read_git_mailmap))
packaging.write_git_changelog(git_dir=self.git_dir,
dest_dir=self.temp_path)
with open(os.path.join(self.temp_path, "ChangeLog"), "r") as ch_fh:
self.assertTrue("email@foo.com" in ch_fh.read())
def test_generate_authors(self):
author_old = u"Foo Foo <email@foo.com>"
author_new = u"Bar Bar <email@bar.com>"
co_author = u"Foo Bar <foo@bar.com>"
co_author_by = u"Co-authored-by: " + co_author
git_log_cmd = (
"git --git-dir=%s log --format=%%aN <%%aE>" % self.git_dir)
git_co_log_cmd = ("git --git-dir=%s log" % self.git_dir)
git_top_level = "git rev-parse --show-toplevel"
cmd_map = {
git_log_cmd: author_new,
git_co_log_cmd: co_author_by,
git_top_level: self.root_dir,
}
exist_files = [self.git_dir,
os.path.join(self.temp_path, "AUTHORS.in")]
self.useFixture(fixtures.MonkeyPatch(
"os.path.exists",
lambda path: os.path.abspath(path) in exist_files))
def _fake_run_shell_command(cmd, **kwargs):
return cmd_map[" ".join(cmd)]
self.useFixture(fixtures.MonkeyPatch(
"pbr.packaging._run_shell_command",
_fake_run_shell_command))
with open(os.path.join(self.temp_path, "AUTHORS.in"), "w") as auth_fh:
auth_fh.write("%s\n" % author_old)
packaging.generate_authors(git_dir=self.git_dir,
dest_dir=self.temp_path)
with open(os.path.join(self.temp_path, "AUTHORS"), "r") as auth_fh:
authors = auth_fh.read()
self.assertTrue(author_old in authors)
self.assertTrue(author_new in authors)
self.assertTrue(co_author in authors)
class BuildSphinxTest(base.BaseTestCase):
scenarios = [
('true_autodoc_caps',
dict(has_opt=True, autodoc='True', has_autodoc=True)),
('true_autodoc_lower',
dict(has_opt=True, autodoc='true', has_autodoc=True)),
('false_autodoc',
dict(has_opt=True, autodoc='False', has_autodoc=False)),
('no_autodoc',
dict(has_opt=False, autodoc='False', has_autodoc=False)),
]
def setUp(self):
super(BuildSphinxTest, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
"sphinx.setup_command.BuildDoc.run", lambda self: None))
from distutils import dist
self.distr = dist.Distribution()
self.distr.packages = ("fake_package",)
self.distr.command_options["build_sphinx"] = {
"source_dir": ["a", "."]}
pkg_fixture = fixtures.PythonPackage(
"fake_package", [("fake_module.py", b"")])
self.useFixture(pkg_fixture)
self.useFixture(base.DiveDir(pkg_fixture.base))
def test_build_doc(self):
if self.has_opt:
self.distr.command_options["pbr"] = {
"autodoc_index_modules": ('setup.cfg', self.autodoc)}
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.run()
self.assertTrue(
os.path.exists("api/autoindex.rst") == self.has_autodoc)
self.assertTrue(
os.path.exists(
"api/fake_package.fake_module.rst") == self.has_autodoc)
class ParseRequirementsTest(base.BaseTestCase):
def setUp(self):
super(ParseRequirementsTest, self).setUp()
(fd, self.tmp_file) = tempfile.mkstemp(prefix='openstack',
suffix='.setup')
def test_parse_requirements_normal(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_git_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-e git://foo.com/zipball#egg=bar")
self.assertEqual(['bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_versioned_git_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-e git://foo.com/zipball#egg=bar-1.2.4")
self.assertEqual(['bar>=1.2.4'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_http_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("https://foo.com/zipball#egg=bar")
self.assertEqual(['bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_versioned_http_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("https://foo.com/zipball#egg=bar-4.2.1")
self.assertEqual(['bar>=4.2.1'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_removes_index_lines(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-f foobar")
self.assertEqual([], packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_removes_argparse(self):
with open(self.tmp_file, 'w') as fh:
fh.write("argparse")
if sys.version_info >= (2, 7):
self.assertEqual([], packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_override_with_env(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.useFixture(
fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES',
self.tmp_file))
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements())
def test_parse_requirements_override_with_env_multiple_files(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.useFixture(
fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES',
"no-such-file," + self.tmp_file))
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements())
def test_get_requirement_from_file_empty(self):
actual = packaging.get_reqs_from_files([])
self.assertEqual([], actual)
def test_parse_requirements_with_comments(self):
with open(self.tmp_file, 'w') as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements([self.tmp_file]))
class ParseDependencyLinksTest(base.BaseTestCase):
def setUp(self):
super(ParseDependencyLinksTest, self).setUp()
(fd, self.tmp_file) = tempfile.mkstemp(prefix="openstack",
suffix=".setup")
def test_parse_dependency_normal(self):
with open(self.tmp_file, "w") as fh:
fh.write("http://test.com\n")
self.assertEqual(
["http://test.com"],
packaging.parse_dependency_links([self.tmp_file]))
def test_parse_dependency_with_git_egg_url(self):
with open(self.tmp_file, "w") as fh:
fh.write("-e git://foo.com/zipball#egg=bar")
self.assertEqual(
["git://foo.com/zipball#egg=bar"],
packaging.parse_dependency_links([self.tmp_file]))
def load_tests(loader, in_tests, pattern):
return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern)
| |
"""Support for alarm control panels that can be controlled through IFTTT."""
from __future__ import annotations
import logging
import re
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
FORMAT_NUMBER,
FORMAT_TEXT,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
AlarmControlPanelEntity,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_CODE,
CONF_NAME,
CONF_OPTIMISTIC,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
)
from homeassistant.core import HomeAssistant, ServiceCall
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import ATTR_EVENT, DOMAIN, SERVICE_PUSH_ALARM_STATE, SERVICE_TRIGGER
_LOGGER = logging.getLogger(__name__)
ALLOWED_STATES = [
STATE_ALARM_DISARMED,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
]
DATA_IFTTT_ALARM = "ifttt_alarm"
DEFAULT_NAME = "Home"
CONF_EVENT_AWAY = "event_arm_away"
CONF_EVENT_HOME = "event_arm_home"
CONF_EVENT_NIGHT = "event_arm_night"
CONF_EVENT_DISARM = "event_disarm"
DEFAULT_EVENT_AWAY = "alarm_arm_away"
DEFAULT_EVENT_HOME = "alarm_arm_home"
DEFAULT_EVENT_NIGHT = "alarm_arm_night"
DEFAULT_EVENT_DISARM = "alarm_disarm"
CONF_CODE_ARM_REQUIRED = "code_arm_required"
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CODE): cv.string,
vol.Optional(CONF_CODE_ARM_REQUIRED, default=True): cv.boolean,
vol.Optional(CONF_EVENT_AWAY, default=DEFAULT_EVENT_AWAY): cv.string,
vol.Optional(CONF_EVENT_HOME, default=DEFAULT_EVENT_HOME): cv.string,
vol.Optional(CONF_EVENT_NIGHT, default=DEFAULT_EVENT_NIGHT): cv.string,
vol.Optional(CONF_EVENT_DISARM, default=DEFAULT_EVENT_DISARM): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=False): cv.boolean,
}
)
PUSH_ALARM_STATE_SERVICE_SCHEMA = vol.Schema(
{vol.Required(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_STATE): cv.string}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up a control panel managed through IFTTT."""
if DATA_IFTTT_ALARM not in hass.data:
hass.data[DATA_IFTTT_ALARM] = []
name = config.get(CONF_NAME)
code = config.get(CONF_CODE)
code_arm_required = config.get(CONF_CODE_ARM_REQUIRED)
event_away = config.get(CONF_EVENT_AWAY)
event_home = config.get(CONF_EVENT_HOME)
event_night = config.get(CONF_EVENT_NIGHT)
event_disarm = config.get(CONF_EVENT_DISARM)
optimistic = config.get(CONF_OPTIMISTIC)
alarmpanel = IFTTTAlarmPanel(
name,
code,
code_arm_required,
event_away,
event_home,
event_night,
event_disarm,
optimistic,
)
hass.data[DATA_IFTTT_ALARM].append(alarmpanel)
add_entities([alarmpanel])
async def push_state_update(service: ServiceCall) -> None:
"""Set the service state as device state attribute."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
state = service.data.get(ATTR_STATE)
devices = hass.data[DATA_IFTTT_ALARM]
if entity_ids:
devices = [d for d in devices if d.entity_id in entity_ids]
for device in devices:
device.push_alarm_state(state)
device.async_schedule_update_ha_state()
hass.services.register(
DOMAIN,
SERVICE_PUSH_ALARM_STATE,
push_state_update,
schema=PUSH_ALARM_STATE_SERVICE_SCHEMA,
)
class IFTTTAlarmPanel(AlarmControlPanelEntity):
"""Representation of an alarm control panel controlled through IFTTT."""
def __init__(
self,
name,
code,
code_arm_required,
event_away,
event_home,
event_night,
event_disarm,
optimistic,
):
"""Initialize the alarm control panel."""
self._name = name
self._code = code
self._code_arm_required = code_arm_required
self._event_away = event_away
self._event_home = event_home
self._event_night = event_night
self._event_disarm = event_disarm
self._optimistic = optimistic
self._state = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
@property
def assumed_state(self):
"""Notify that this platform return an assumed state."""
return True
@property
def code_format(self):
"""Return one or more digits/characters."""
if self._code is None:
return None
if isinstance(self._code, str) and re.search("^\\d+$", self._code):
return FORMAT_NUMBER
return FORMAT_TEXT
def alarm_disarm(self, code=None):
"""Send disarm command."""
if not self._check_code(code):
return
self.set_alarm_state(self._event_disarm, STATE_ALARM_DISARMED)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
if self._code_arm_required and not self._check_code(code):
return
self.set_alarm_state(self._event_away, STATE_ALARM_ARMED_AWAY)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
if self._code_arm_required and not self._check_code(code):
return
self.set_alarm_state(self._event_home, STATE_ALARM_ARMED_HOME)
def alarm_arm_night(self, code=None):
"""Send arm night command."""
if self._code_arm_required and not self._check_code(code):
return
self.set_alarm_state(self._event_night, STATE_ALARM_ARMED_NIGHT)
def set_alarm_state(self, event, state):
"""Call the IFTTT trigger service to change the alarm state."""
data = {ATTR_EVENT: event}
self.hass.services.call(DOMAIN, SERVICE_TRIGGER, data)
_LOGGER.debug("Called IFTTT integration to trigger event %s", event)
if self._optimistic:
self._state = state
def push_alarm_state(self, value):
"""Push the alarm state to the given value."""
if value in ALLOWED_STATES:
_LOGGER.debug("Pushed the alarm state to %s", value)
self._state = value
def _check_code(self, code):
return self._code is None or self._code == code
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import time
from oslo_log import log as logging
from tempest.common import image as common_image
from tempest import config
from tempest import exceptions
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.image.v1 import images_client as images_v1_client
CONF = config.CONF
LOG = logging.getLogger(__name__)
def _get_task_state(body):
return body.get('OS-EXT-STS:task_state', None)
# NOTE(afazekas): This function needs to know a token and a subject.
def wait_for_server_status(client, server_id, status, ready_wait=True,
extra_timeout=0, raise_on_error=True):
"""Waits for a server to reach a given status."""
# NOTE(afazekas): UNKNOWN status possible on ERROR
# or in a very early stage.
body = client.show_server(server_id)['server']
old_status = server_status = body['status']
old_task_state = task_state = _get_task_state(body)
start_time = int(time.time())
timeout = client.build_timeout + extra_timeout
while True:
# NOTE(afazekas): Now the BUILD status only reached
# between the UNKNOWN->ACTIVE transition.
# TODO(afazekas): enumerate and validate the stable status set
if status == 'BUILD' and server_status != 'UNKNOWN':
return
if server_status == status:
if ready_wait:
if status == 'BUILD':
return
# NOTE(afazekas): The instance is in "ready for action state"
# when no task in progress
if task_state is None:
# without state api extension 3 sec usually enough
time.sleep(CONF.compute.ready_wait)
return
else:
return
time.sleep(client.build_interval)
body = client.show_server(server_id)['server']
server_status = body['status']
task_state = _get_task_state(body)
if (server_status != old_status) or (task_state != old_task_state):
LOG.info('State transition "%s" ==> "%s" after %d second wait',
'/'.join((old_status, str(old_task_state))),
'/'.join((server_status, str(task_state))),
time.time() - start_time)
if (server_status == 'ERROR') and raise_on_error:
if 'fault' in body:
raise exceptions.BuildErrorException(body['fault'],
server_id=server_id)
else:
raise exceptions.BuildErrorException(server_id=server_id)
timed_out = int(time.time()) - start_time >= timeout
if timed_out:
expected_task_state = 'None' if ready_wait else 'n/a'
message = ('Server %(server_id)s failed to reach %(status)s '
'status and task state "%(expected_task_state)s" '
'within the required time (%(timeout)s s).' %
{'server_id': server_id,
'status': status,
'expected_task_state': expected_task_state,
'timeout': timeout})
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise lib_exc.TimeoutException(message)
old_status = server_status
old_task_state = task_state
def wait_for_server_termination(client, server_id, ignore_error=False):
"""Waits for server to reach termination."""
try:
body = client.show_server(server_id)['server']
except lib_exc.NotFound:
return
old_status = body['status']
old_task_state = _get_task_state(body)
start_time = int(time.time())
while True:
time.sleep(client.build_interval)
try:
body = client.show_server(server_id)['server']
except lib_exc.NotFound:
return
server_status = body['status']
task_state = _get_task_state(body)
if (server_status != old_status) or (task_state != old_task_state):
LOG.info('State transition "%s" ==> "%s" after %d second wait',
'/'.join((old_status, str(old_task_state))),
'/'.join((server_status, str(task_state))),
time.time() - start_time)
if server_status == 'ERROR' and not ignore_error:
raise lib_exc.DeleteErrorException(
"Server %s failed to delete and is in ERROR status" %
server_id)
if int(time.time()) - start_time >= client.build_timeout:
raise lib_exc.TimeoutException
old_status = server_status
old_task_state = task_state
def wait_for_image_status(client, image_id, status):
"""Waits for an image to reach a given status.
The client should have a show_image(image_id) method to get the image.
The client should also have build_interval and build_timeout attributes.
"""
if isinstance(client, images_v1_client.ImagesClient):
# The 'check_image' method is used here because the show_image method
# returns image details plus the image itself which is very expensive.
# The 'check_image' method returns just image details.
def _show_image_v1(image_id):
resp = client.check_image(image_id)
return common_image.get_image_meta_from_headers(resp)
show_image = _show_image_v1
else:
show_image = client.show_image
current_status = 'An unknown status'
start = int(time.time())
while int(time.time()) - start < client.build_timeout:
image = show_image(image_id)
# Compute image client returns response wrapped in 'image' element
# which is not the case with Glance image client.
if 'image' in image:
image = image['image']
current_status = image['status']
if current_status == status:
return
if current_status.lower() == 'killed':
raise exceptions.ImageKilledException(image_id=image_id,
status=status)
if current_status.lower() == 'error':
raise exceptions.AddImageException(image_id=image_id)
time.sleep(client.build_interval)
message = ('Image %(image_id)s failed to reach %(status)s state '
'(current state %(current_status)s) within the required '
'time (%(timeout)s s).' % {'image_id': image_id,
'status': status,
'current_status': current_status,
'timeout': client.build_timeout})
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise lib_exc.TimeoutException(message)
def wait_for_volume_resource_status(client, resource_id, status):
"""Waits for a volume resource to reach a given status.
This function is a common function for volume, snapshot and backup
resources. The function extracts the name of the desired resource from
the client class name of the resource.
"""
resource_name = re.findall(
r'(volume|group-snapshot|snapshot|backup|group)',
client.resource_type)[-1].replace('-', '_')
show_resource = getattr(client, 'show_' + resource_name)
resource_status = show_resource(resource_id)[resource_name]['status']
start = int(time.time())
while resource_status != status:
time.sleep(client.build_interval)
resource_status = show_resource(resource_id)[
'{}'.format(resource_name)]['status']
if resource_status == 'error' and resource_status != status:
raise exceptions.VolumeResourceBuildErrorException(
resource_name=resource_name, resource_id=resource_id)
if resource_name == 'volume' and resource_status == 'error_restoring':
raise exceptions.VolumeRestoreErrorException(volume_id=resource_id)
if resource_status == 'error_extending' and resource_status != status:
raise exceptions.VolumeExtendErrorException(volume_id=resource_id)
if int(time.time()) - start >= client.build_timeout:
message = ('%s %s failed to reach %s status (current %s) '
'within the required time (%s s).' %
(resource_name, resource_id, status, resource_status,
client.build_timeout))
raise lib_exc.TimeoutException(message)
LOG.info('%s %s reached %s after waiting for %f seconds',
resource_name, resource_id, status, time.time() - start)
def wait_for_volume_migration(client, volume_id, new_host):
"""Waits for a Volume to move to a new host."""
body = client.show_volume(volume_id)['volume']
host = body['os-vol-host-attr:host']
migration_status = body['migration_status']
start = int(time.time())
# new_host is hostname@backend while current_host is hostname@backend#type
while migration_status != 'success' or new_host not in host:
time.sleep(client.build_interval)
body = client.show_volume(volume_id)['volume']
host = body['os-vol-host-attr:host']
migration_status = body['migration_status']
if migration_status == 'error':
message = ('volume %s failed to migrate.' % (volume_id))
raise lib_exc.TempestException(message)
if int(time.time()) - start >= client.build_timeout:
message = ('Volume %s failed to migrate to %s (current %s) '
'within the required time (%s s).' %
(volume_id, new_host, host, client.build_timeout))
raise lib_exc.TimeoutException(message)
def wait_for_volume_retype(client, volume_id, new_volume_type):
"""Waits for a Volume to have a new volume type."""
body = client.show_volume(volume_id)['volume']
current_volume_type = body['volume_type']
start = int(time.time())
while current_volume_type != new_volume_type:
time.sleep(client.build_interval)
body = client.show_volume(volume_id)['volume']
current_volume_type = body['volume_type']
if int(time.time()) - start >= client.build_timeout:
message = ('Volume %s failed to reach %s volume type (current %s) '
'within the required time (%s s).' %
(volume_id, new_volume_type, current_volume_type,
client.build_timeout))
raise lib_exc.TimeoutException(message)
def wait_for_qos_operations(client, qos_id, operation, args=None):
"""Waits for a qos operations to be completed.
NOTE : operation value is required for wait_for_qos_operations()
operation = 'qos-key' / 'disassociate' / 'disassociate-all'
args = keys[] when operation = 'qos-key'
args = volume-type-id disassociated when operation = 'disassociate'
args = None when operation = 'disassociate-all'
"""
start_time = int(time.time())
while True:
if operation == 'qos-key-unset':
body = client.show_qos(qos_id)['qos_specs']
if not any(key in body['specs'] for key in args):
return
elif operation == 'disassociate':
body = client.show_association_qos(qos_id)['qos_associations']
if not any(args in body[i]['id'] for i in range(0, len(body))):
return
elif operation == 'disassociate-all':
body = client.show_association_qos(qos_id)['qos_associations']
if not body:
return
else:
msg = (" operation value is either not defined or incorrect.")
raise lib_exc.UnprocessableEntity(msg)
if int(time.time()) - start_time >= client.build_timeout:
raise lib_exc.TimeoutException
time.sleep(client.build_interval)
def wait_for_interface_status(client, server_id, port_id, status):
"""Waits for an interface to reach a given status."""
body = (client.show_interface(server_id, port_id)
['interfaceAttachment'])
interface_status = body['port_state']
start = int(time.time())
while(interface_status != status):
time.sleep(client.build_interval)
body = (client.show_interface(server_id, port_id)
['interfaceAttachment'])
interface_status = body['port_state']
timed_out = int(time.time()) - start >= client.build_timeout
if interface_status != status and timed_out:
message = ('Interface %s failed to reach %s status '
'(current %s) within the required time (%s s).' %
(port_id, status, interface_status,
client.build_timeout))
raise lib_exc.TimeoutException(message)
return body
def wait_for_interface_detach(client, server_id, port_id):
"""Waits for an interface to be detached from a server."""
body = client.list_interfaces(server_id)['interfaceAttachments']
ports = [iface['port_id'] for iface in body]
start = int(time.time())
while port_id in ports:
time.sleep(client.build_interval)
body = client.list_interfaces(server_id)['interfaceAttachments']
ports = [iface['port_id'] for iface in body]
if port_id not in ports:
return body
timed_out = int(time.time()) - start >= client.build_timeout
if timed_out:
message = ('Interface %s failed to detach from server %s within '
'the required time (%s s)' % (port_id, server_id,
client.build_timeout))
raise lib_exc.TimeoutException(message)
| |
import re
from functools import update_wrapper
from weakref import WeakSet
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.admin.views.autocomplete import AutocompleteJsonView
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured
from django.db.models.base import ModelBase
from django.http import (
Http404, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, Resolver404, resolve, reverse
from django.utils.decorators import method_decorator
from django.utils.functional import LazyObject
from django.utils.module_loading import import_string
from django.utils.text import capfirst
from django.utils.translation import gettext as _, gettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.common import no_append_slash
from django.views.decorators.csrf import csrf_protect
from django.views.i18n import JavaScriptCatalog
all_sites = WeakSet()
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite:
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = gettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = gettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = gettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
enable_nav_sidebar = True
empty_value_display = '-'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
final_catch_all_view = True
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
all_sites.add(self)
def __repr__(self):
return f'{self.__class__.__name__}(name={self.name!r})'
def check(self, app_configs):
"""
Run the system checks on all ModelAdmins, except if they aren't
customized at all.
"""
if app_configs is None:
app_configs = apps.get_app_configs()
app_configs = set(app_configs) # Speed up lookups below
errors = []
modeladmins = (o for o in self._registry.values() if o.__class__ is not ModelAdmin)
for modeladmin in modeladmins:
if modeladmin.model._meta.app_config in app_configs:
errors.extend(modeladmin.check())
return errors
def register(self, model_or_iterable, admin_class=None, **options):
"""
Register the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, use ModelAdmin (the default admin
options). If keyword arguments are given -- e.g., list_display --
apply them as options to the admin class.
If a model is already registered, raise AlreadyRegistered.
If a model is abstract, raise ImproperlyConfigured.
"""
admin_class = admin_class or ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured(
'The model %s is abstract, so it cannot be registered with admin.' % model.__name__
)
if model in self._registry:
registered_admin = str(self._registry[model])
msg = 'The model %s is already registered ' % model.__name__
if registered_admin.endswith('.ModelAdmin'):
# Most likely registered without a ModelAdmin subclass.
msg += 'in app %r.' % re.sub(r'\.ModelAdmin$', '', registered_admin)
else:
msg += 'with %r.' % registered_admin
raise AlreadyRegistered(msg)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregister the given model(s).
If a model isn't already registered, raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raise KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raise KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return self._actions.items()
def has_permission(self, request):
"""
Return True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.urls import path
urls = super().get_urls()
urls += [
path('my_view/', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
from django.urls import include, path, re_path
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
path('', wrap(self.index), name='index'),
path('login/', self.login, name='login'),
path('logout/', wrap(self.logout), name='logout'),
path('password_change/', wrap(self.password_change, cacheable=True), name='password_change'),
path(
'password_change/done/',
wrap(self.password_change_done, cacheable=True),
name='password_change_done',
),
path('autocomplete/', wrap(self.autocomplete_view), name='autocomplete'),
path('jsi18n/', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
path(
'r/<int:content_type_id>/<path:object_id>/',
wrap(contenttype_views.shortcut),
name='view_on_site',
),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in self._registry.items():
urlpatterns += [
path('%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
re_path(regex, wrap(self.app_index), name='app_list'),
]
if self.final_catch_all_view:
urlpatterns.append(re_path(r'(?P<url>.*)$', wrap(self.catch_all_view)))
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Return a dictionary of variables to put in the template context for
*every* page in the admin site.
For sites running on a subpath, use the SCRIPT_NAME value if site_url
hasn't been customized.
"""
script_name = request.META['SCRIPT_NAME']
site_url = script_name if self.site_url == '/' and script_name else self.site_url
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': site_url,
'has_permission': self.has_permission(request),
'available_apps': self.get_app_list(request),
'is_popup': False,
'is_nav_sidebar_enabled': self.enable_nav_sidebar,
}
def password_change(self, request, extra_context=None):
"""
Handle the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import PasswordChangeView
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'form_class': AdminPasswordChangeForm,
'success_url': url,
'extra_context': {**self.each_context(request), **(extra_context or {})},
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
request.current_app = self.name
return PasswordChangeView.as_view(**defaults)(request)
def password_change_done(self, request, extra_context=None):
"""
Display the "success" page after a password change.
"""
from django.contrib.auth.views import PasswordChangeDoneView
defaults = {
'extra_context': {**self.each_context(request), **(extra_context or {})},
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
request.current_app = self.name
return PasswordChangeDoneView.as_view(**defaults)(request)
def i18n_javascript(self, request, extra_context=None):
"""
Display the i18n JavaScript that the Django admin requires.
`extra_context` is unused but present for consistency with the other
admin views.
"""
return JavaScriptCatalog.as_view(packages=['django.contrib.admin'])(request)
def logout(self, request, extra_context=None):
"""
Log out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import LogoutView
defaults = {
'extra_context': {
**self.each_context(request),
# Since the user isn't logged out at this point, the value of
# has_permission must be overridden.
'has_permission': False,
**(extra_context or {})
},
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
request.current_app = self.name
return LogoutView.as_view(**defaults)(request)
@method_decorator(never_cache)
def login(self, request, extra_context=None):
"""
Display the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth.views import LoginView
context = {
**self.each_context(request),
'title': _('Log in'),
'app_path': request.get_full_path(),
'username': request.user.get_username(),
}
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name)
context.update(extra_context or {})
defaults = {
'extra_context': context,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
request.current_app = self.name
return LoginView.as_view(**defaults)(request)
def autocomplete_view(self, request):
return AutocompleteJsonView.as_view(admin_site=self)(request)
@no_append_slash
def catch_all_view(self, request, url):
if settings.APPEND_SLASH and not url.endswith('/'):
urlconf = getattr(request, 'urlconf', None)
try:
match = resolve('%s/' % request.path_info, urlconf)
except Resolver404:
pass
else:
if getattr(match.func, 'should_append_slash', True):
return HttpResponsePermanentRedirect('%s/' % request.path)
raise Http404
def _build_app_dict(self, request, label=None):
"""
Build the app dictionary. The optional `label` parameter filters models
of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in self._registry.items()
if m._meta.app_label == label
}
else:
models = self._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'model': model,
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
'admin_url': None,
'add_url': None,
}
if perms.get('change') or perms.get('view'):
model_dict['view_only'] = not perms.get('change')
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict
def get_app_list(self, request):
"""
Return a sorted list of all the installed apps that have been
registered in this site.
"""
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return app_list
def index(self, request, extra_context=None):
"""
Display the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_list = self.get_app_list(request)
context = {
**self.each_context(request),
'title': self.index_title,
'subtitle': None,
'app_list': app_list,
**(extra_context or {}),
}
request.current_app = self.name
return TemplateResponse(request, self.index_template or 'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_dict = self._build_app_dict(request, app_label)
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = {
**self.each_context(request),
'title': _('%(app)s administration') % {'app': app_dict['name']},
'subtitle': None,
'app_list': [app_dict],
'app_label': app_label,
**(extra_context or {}),
}
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
class DefaultAdminSite(LazyObject):
def _setup(self):
AdminSiteClass = import_string(apps.get_app_config('admin').default_site)
self._wrapped = AdminSiteClass()
def __repr__(self):
return repr(self._wrapped)
# This global object represents the default admin site, for the common case.
# You can provide your own AdminSite using the (Simple)AdminConfig.default_site
# attribute. You can also instantiate AdminSite in your own code to create a
# custom admin site.
site = DefaultAdminSite()
| |
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
## Gazebo send position topic or calls set pose service for ros_sim_iface consumption
PKG = 'gazebo_plugins'
NAME = 'set_pose'
import math
import roslib
roslib.load_manifest(PKG)
import sys, unittest
import os, os.path, threading, time
import rospy, rostest
from gazebo_plugins.srv import SetPose
from std_msgs.msg import String
from geometry_msgs.msg import Pose,Quaternion,Point, PoseStamped, PoseWithCovariance, TwistWithCovariance, Twist, Vector3
from nav_msgs.msg import Odometry
import tf.transformations as tft
from numpy import float64
COV = [float64(0),float64(0),float64(0),float64(0),float64(0),float64(0), \
float64(0),float64(0),float64(0),float64(0),float64(0),float64(0), \
float64(0),float64(0),float64(0),float64(0),float64(0),float64(0), \
float64(0),float64(0),float64(0),float64(0),float64(0),float64(0), \
float64(0),float64(0),float64(0),float64(0),float64(0),float64(0), \
float64(0),float64(0),float64(0),float64(0),float64(0),float64(0) ]
def normalize_angle_positive(angle):
return math.fmod(math.fmod(angle, 2*math.pi) + 2*math.pi, 2*math.pi)
def normalize_angle(angle):
anorm = normalize_angle_positive(angle)
if anorm > math.pi:
anorm -= 2*math.pi
return anorm
def shortest_angular_distance(angle_from, angle_to):
angle_diff = normalize_angle_positive(angle_to) - normalize_angle_positive(angle_from)
if angle_diff > math.pi:
angle_diff = -(2*math.pi - angle_diff)
return normalize_angle(angle_diff)
class SimIfaceControl():
def __init__(self):
self.update_rate=10
self.timeout=1
self.target_p = [0,0,0] # position
self.target_q = [0,0,0] # quaternion
self.target_e = [0,0,0] # euler pose
self.wait_topic_initialized = False
self.frame_id = "world"
self.service_name = "set_pose_service"
self.topic_name = "set_pose_topic"
self.use_topic = False;
self.use_service = False;
self.wait_topic_name = "clock"
self.wait_for_topic = False;
rospy.init_node(NAME, anonymous=True)
def setPoseService(self,pose_msg):
print "waiting for service to set pose"
rospy.wait_for_service(self.service_name);
try:
set_pose = rospy.ServiceProxy(self.service_name, SetPose)
resp1 = set_pose(pose_msg)
return resp1.success
except rospy.ServiceException, e:
print "service call failed: %s"%e
def waitTopicInput(self,p3d):
#self.p3d_p = [p3d.pose.pose.position.x, p3d.pose.pose.position.y, p3d.pose.pose.position.z]
#self.p3d_q = [p3d.pose.pose.orientation.x, p3d.pose.pose.orientation.y, p3d.pose.pose.orientation.z, p3d.pose.pose.orientation.w]
#self.p3d_e = tft.euler_from_quaternion(self.p3d_q)
self.wait_topic_initialized = True
def setPose(self):
# get goal from commandline
for i in range(0,len(sys.argv)):
if sys.argv[i] == '-update_rate':
if len(sys.argv) > i+1:
self.update_rate = float(sys.argv[i+1])
if sys.argv[i] == '-timeout':
if len(sys.argv) > i+1:
self.timeout = float(sys.argv[i+1])
if sys.argv[i] == '-x':
if len(sys.argv) > i+1:
self.target_p[0] = float(sys.argv[i+1])
if sys.argv[i] == '-y':
if len(sys.argv) > i+1:
self.target_p[1] = float(sys.argv[i+1])
if sys.argv[i] == '-z':
if len(sys.argv) > i+1:
self.target_p[2] = float(sys.argv[i+1])
if sys.argv[i] == '-R':
if len(sys.argv) > i+1:
self.target_e[0] = float(sys.argv[i+1])
if sys.argv[i] == '-P':
if len(sys.argv) > i+1:
self.target_e[1] = float(sys.argv[i+1])
if sys.argv[i] == '-Y':
if len(sys.argv) > i+1:
self.target_e[2] = float(sys.argv[i+1])
if sys.argv[i] == '-f':
if len(sys.argv) > i+1:
self.frame_id = sys.argv[i+1]
if sys.argv[i] == '-s':
if len(sys.argv) > i+1:
self.service_name = sys.argv[i+1]
self.use_service = True;
if sys.argv[i] == '-t':
if len(sys.argv) > i+1:
self.topic_name = sys.argv[i+1]
self.use_topic = True;
if sys.argv[i] == '-p':
if len(sys.argv) > i+1:
self.wait_topic_name = sys.argv[i+1]
self.wait_for_topic = True;
# setup rospy
self.pub_set_pose_topic = rospy.Publisher(self.topic_name, Odometry)
rospy.Subscriber(self.wait_topic_name, rospy.AnyMsg, self.waitTopicInput)
# wait for topic if user requests
if self.wait_for_topic:
while not self.wait_topic_initialized:
time.sleep(0.1)
# compoose goal message
h = rospy.Header()
h.stamp = rospy.get_rostime()
h.frame_id = self.frame_id
p = Point(self.target_p[0],self.target_p[1],self.target_p[2])
tmpq = tft.quaternion_from_euler(self.target_e[0],self.target_e[1],self.target_e[2])
q = Quaternion(tmpq[0],tmpq[1],tmpq[2],tmpq[3])
pose = Pose(p,q)
pwc = PoseWithCovariance(pose,COV)
twc = TwistWithCovariance(Twist(Vector3(),Vector3()),COV)
child_frame_id = "" # what should this be?
target_pose = Odometry(h,child_frame_id,pwc,twc)
if self.use_service:
success = self.setPoseService(target_pose)
# publish topic if specified
if self.use_topic:
timeout_t = time.time() + self.timeout
while not rospy.is_shutdown() and time.time() < timeout_t:
# publish target pose
self.pub_set_pose_topic.publish(target_pose)
if self.update_rate > 0:
time.sleep(1.0/self.update_rate)
else:
time.sleep(0.001)
def print_usage(exit_code = 0):
print '''Commands:
-update_rate <Hz> - update rate, default to 10 Hz
-timeout <seconds> - test timeout in seconds. default to 1 seconds
-x <x in meters>
-y <y in meters>
-z <z in meters>
-R <roll in radians>
-P <pitch in radians>
-Y <yaw in radians>
-f target frame_id
-s set pose service name
-t set pose topic name
-p wait for this ros topic to be published first
'''
if __name__ == '__main__':
#print usage if not arguments
if len(sys.argv) == 1:
print_usage()
else:
sic = SimIfaceControl()
sic.setPose()
| |
import glob
import matplotlib.pylab as plt
import os
import six
import numpy as np
from keras import backend as K
from keras.applications import vgg16
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.engine import Input
from keras.engine import Model
from keras.layers import Lambda
from keras.layers.convolutional import Convolution2D, UpSampling2D
from keras.layers.core import Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from keras.optimizers import Adam
from keras.preprocessing.image import load_img, img_to_array
import my_utils
import pascal_image_pair
from keras_module.data_draw.draw_pascal import draw_images_pair
from keras_module.hwh_callbacks import DrawEpoch
# from keras_module.hwh_layers import BatchRenormalization
from keras_module.hwh_layers import BatchRenormalization, InstanceNormalization
from myVgg16 import myVGG16
model = Sequential()
args = {
'batch_size': 32,
'max_height': 168,
'max_width': 168,
'activation': 'relu',
'num_res_filters': 56,
'num_blocks': 1
}
img_nrows, img_ncols, channels = args['max_height'], args['max_width'], 3
weigths = {'loss_weight': 0.025,
'stlye_weigth': 1.0,
'tv_weigth': 1.0}
# style_path = "../style_image/style/asheville.jpg"
style_path = "../style_image/style/brushstrokes.jpg"
weight_path = 'style-transfer'
nb_epochs = 10000
batch_size = 32
data_type = 'trainval'
is_test = True
pasacl_train = pascal_image_pair.PascalVOC2012SegmentationDatasetImagePair(data_type,
target_size=(img_nrows, img_ncols))
data_total_len = len(pasacl_train)
my_gen_train = my_utils.ImageIterator(pasacl_train, batch_size)
vgg16_model = myVGG16()
vgg16_model.load_weigth()
# x= Input((224,224,3))
# y=model(x)
# y=Convolution2D(3,3)(y)
# y_model = Model(x,y)
#
# layers=model.get_layer(index=7)
# layers.trainable=True
# # y_model.compile(Adam(),loss='binary_crossentropy')
# print(y_model.summary())
# # print(layers)
# exit()
def preprocess_image(image_path):
img = load_img(image_path, target_size=(args['max_height'], args['max_width']))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = vgg16.preprocess_input(img)
return img
def deprocess_image(x):
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
# def draw_images_pair(img1_datas, img2_datas, batch_size=5, is_save=True, prefix='st-', index_pro=1):
# plt.figure(figsize=(100, 40))
# for index in range(batch_size):
# datum = img1_datas[index]
# datum = deprocess_image(datum)
# label = img2_datas[index]
# label = deprocess_image(label)
# plt.subplot(2, batch_size, index + 1)
# plt.imshow(datum)
# plt.subplot(2, batch_size, batch_size + index + 1)
# plt.imshow(label)
# if is_save:
# plt.savefig(prefix + str(index_pro) + '.jpg')
# else:
# plt.show()
# return
#
# for _ in range(1):
# x_test, y_test = my_gen_train.next()
# print(x_test.shape)
# print(np.max(x_test),np.min(x_test))
# draw_images_pair(x_test, y_test, index_pro=111, is_save=False)
# exit()
style_image = preprocess_image(style_path)
# print(style_image.shape)
style_image = np.repeat(style_image, axis=0, repeats=batch_size)
# print(style_image.shape)
# style_image = deprocess_image(style_image[4, :, :, :])
# plt.imshow(style_image)
# plt.show()
# exit()
def add_seq_conv_block(net, filters, filter_size, activation='relu', subsample=(1, 1), input_shape=None, is_IB=True):
if input_shape:
kwargs = dict(batch_input_shape=input_shape)
else:
kwargs = dict()
net.add(Convolution2D(
filters, (filter_size, filter_size), strides=subsample, padding='same', **kwargs))
if is_IB:
net.add(InstanceNormalization())
else:
net.add(BatchNormalization())
if isinstance(activation, six.string_types):
if activation != 'linear':
net.add(Activation(activation))
else:
net.add(activation())
def create_sequential_texture_net(input_rows, input_cols, num_res_filters=128,
activation='relu', num_inner_blocks=5, batch_size=32):
net = Sequential()
add_seq_conv_block(net, num_res_filters // 4, 9, input_shape=(batch_size, input_rows, input_cols, 3),
activation=activation)
add_seq_conv_block(net, num_res_filters // 2, 3, subsample=(2, 2), activation=activation)
add_seq_conv_block(net, num_res_filters, 3, subsample=(2, 2), activation=activation)
for i in range(num_inner_blocks):
add_seq_conv_block(net, num_res_filters, 3, activation=activation)
add_seq_conv_block(net, num_res_filters, 3, activation=activation)
net.add(UpSampling2D())
add_seq_conv_block(net, num_res_filters // 2, 3, activation=activation)
net.add(UpSampling2D())
add_seq_conv_block(net, num_res_filters // 4, 3, activation=activation)
add_seq_conv_block(net, 3, 9, activation='tanh')
net.add(Lambda(lambda x: x * 128.))
return net
def gram_matrix(x):
assert K.ndim(x) == 4
xs = K.shape(x)
# features = K.reshape(x, (xs[0], xs[1], xs[2] * xs[3]))
# gram = K.batch_dot(features, K.permute_dimensions(features, (0, 2, 1)))
x = K.permute_dimensions(x, (0, 3, 1, 2))
# print(K.get_variable_shape(x))
features = K.reshape(x, (xs[0], xs[1], xs[2] * xs[3]))
gram = K.batch_dot(features, K.permute_dimensions(features, (0, 2, 1)))
# print(K.get_variable_shape(gram))
return gram
def style_loss(style, combination):
assert K.ndim(style) == 4
assert K.ndim(combination) == 4
target = style
generated = combination
var_shape = K.get_variable_shape(style)
var_squar_prod = np.square(np.prod(var_shape[1:]))
# print(var_squar_prod)
return K.mean(
K.sum(K.square(gram_matrix(target) - gram_matrix(generated)), axis=(1, 2))
) / (4.0 * var_squar_prod)
def tv_loss(x):
assert K.ndim(x) == 4
# a = K.square(x[:, :, 1:, :-1] - x[:, :, :-1, :-1])
# b = K.square(x[:, :, :-1, 1:] - x[:, :, :-1, :-1])
a = K.square(x[:, 1:, :-1, :] - x[:, :-1, :-1, :])
b = K.square(x[:, :-1, 1:, :] - x[:, :-1, :-1, :])
return K.mean(K.sum(K.pow(a + b, 1.25), axis=(1, 2, 3)))
# y_true:content_image y_pred:generated image
def style_total_loss(y_true, y_pred):
loss = K.variable(0.)
style_reference_image = K.variable(style_image)
input_tensor = K.concatenate([y_pred, y_true,
style_reference_image], axis=0)
# print(K.get_variable_shape(y_true))
# print(K.get_variable_shape(input_tensor))
# print(K.get_variable_shape(input_tensor))
content, style_layers = vgg16_model.get_layer(input_tensor)
generated = content[:batch_size, :, :, :]
contented = content[batch_size:2 * batch_size, :, :, :]
# print(K.get_variable_shape(generated))
# print(K.get_variable_shape(contented))
# exit()
loss += weigths['loss_weight'] * K.mean(
K.sum(K.square(contented - generated), axis=(1, 2, 3))
)
for style_layer in style_layers:
combination_features = style_layer[:batch_size, :, :, :]
style_reference_features = style_layer[2 * batch_size:3 * batch_size, :, :, :]
sl = style_loss(style_reference_features, combination_features)
loss += weigths['stlye_weigth'] * sl
loss += weigths['tv_weigth'] * tv_loss(y_pred[:batch_size, :, :, :])
return loss
if __name__ == '__main__':
texnet = create_sequential_texture_net(args['max_height'], args['max_width'],
activation=args['activation'], num_res_filters=args['num_res_filters'],
num_inner_blocks=args['num_blocks'], batch_size=args['batch_size'])
#
# texnet.summary()
# exit()
texnet.compile(Adam(), loss=style_total_loss)
is_test = False
nb_epochs = 100
img_abc, label_abc = my_gen_train.next()
img_abc, label_abc = my_gen_train.next()
imb_abc = img_abc.copy()
label_abc = label_abc.copy()
# Train
while True:
try:
newest = max(glob.iglob(weight_path + '*.hdf5'), key=os.path.getctime)
texnet.load_weights(newest)
print("Load newest: ")
print(" " + newest)
finally:
if is_test:
# texnet.load_weights('style-transfer-epoch.014.hdf5')
for indexabc in range(4):
img_test, label_test = my_gen_train.next()
# print(np.max(img_test), np.min(img_test))
label_pred = texnet.predict(img_test)
# print(np.max(label_pred), np.min(label_pred))
# exit()
draw_images_pair(img_test, label_pred,
batch_size=5, is_save=False, prefix='st-', index_pro=indexabc,is_block=True)
exit()
# checkpointer = ModelCheckpoint(filepath=weight_path + '-epoch.{epoch:03d}-{loss:.4f}.hdf5',
# monitor='loss', verbose=1, period=2)
checkpointer = ModelCheckpoint(filepath=weight_path + '-epoch.{epoch:03d}.hdf5',
verbose=1, period=5)
# plt.ion()
draw_function = lambda x, y, epoch: draw_images_pair(x, y, epoch,
batch_size=5, is_save=True, prefix='st-')
drawepoch = DrawEpoch(img_abc, texnet, draw_function=draw_function, period=2)
# tensorboard = TensorBoard(log_dir="logs", histogram_freq=0)
texnet.fit_generator(generator=my_gen_train, steps_per_epoch=int(data_total_len / batch_size),
epochs=nb_epochs, max_q_size=10,
callbacks=[checkpointer, drawepoch], verbose=1, workers=2)
exit()
| |
"""
Test the parallel module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2010-2011 Gael Varoquaux
# License: BSD Style, 3 clauses.
import time
import sys
import io
import os
try:
import cPickle as pickle
PickleError = TypeError
except:
import pickle
PickleError = pickle.PicklingError
if sys.version_info[0] == 3:
PickleError = pickle.PicklingError
try:
# Python 2/Python 3 compat
unicode('str')
except NameError:
unicode = lambda s: s
from ..parallel import Parallel, delayed, SafeFunction, WorkerInterrupt, \
mp, cpu_count, VALID_BACKENDS
from ..my_exceptions import JoblibException
import nose
ALL_VALID_BACKENDS = [None] + VALID_BACKENDS
if hasattr(mp, 'get_context'):
# Custom multiprocessing context in Python 3.4+
ALL_VALID_BACKENDS.append(mp.get_context('spawn'))
###############################################################################
def division(x, y):
return x / y
def square(x):
return x ** 2
def exception_raiser(x):
if x == 7:
raise ValueError
return x
def interrupt_raiser(x):
time.sleep(.05)
raise KeyboardInterrupt
def f(x, y=0, z=0):
""" A module-level function so that it can be spawn with
multiprocessing.
"""
return x ** 2 + y + z
###############################################################################
def test_cpu_count():
assert cpu_count() > 0
###############################################################################
# Test parallel
def check_simple_parallel(backend):
X = range(5)
for n_jobs in (1, 2, -1, -2):
nose.tools.assert_equal(
[square(x) for x in X],
Parallel(n_jobs=n_jobs)(delayed(square)(x) for x in X))
try:
# To smoke-test verbosity, we capture stdout
orig_stdout = sys.stdout
orig_stderr = sys.stdout
if sys.version_info[0] == 3:
sys.stderr = io.StringIO()
sys.stderr = io.StringIO()
else:
sys.stdout = io.BytesIO()
sys.stderr = io.BytesIO()
for verbose in (2, 11, 100):
Parallel(n_jobs=-1, verbose=verbose, backend=backend)(
delayed(square)(x) for x in X)
Parallel(n_jobs=1, verbose=verbose, backend=backend)(
delayed(square)(x) for x in X)
Parallel(n_jobs=2, verbose=verbose, pre_dispatch=2,
backend=backend)(
delayed(square)(x) for x in X)
Parallel(n_jobs=2, verbose=verbose, backend=backend)(
delayed(square)(x) for x in X)
except Exception as e:
my_stdout = sys.stdout
my_stderr = sys.stderr
sys.stdout = orig_stdout
sys.stderr = orig_stderr
print(unicode(my_stdout.getvalue()))
print(unicode(my_stderr.getvalue()))
raise e
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def test_simple_parallel():
for backend in ALL_VALID_BACKENDS:
yield check_simple_parallel, backend
def nested_loop(backend):
Parallel(n_jobs=2, backend=backend)(
delayed(square)(.01) for _ in range(2))
def check_nested_loop(parent_backend, child_backend):
Parallel(n_jobs=2, backend=parent_backend)(
delayed(nested_loop)(child_backend) for _ in range(2))
def test_nested_loop():
for parent_backend in VALID_BACKENDS:
for child_backend in VALID_BACKENDS:
yield check_nested_loop, parent_backend, child_backend
def increment_input(a):
a[0] += 1
def test_increment_input_with_threads():
"""Input is mutable when using the threading backend"""
a = [0]
Parallel(n_jobs=2, backend="threading")(
delayed(increment_input)(a) for _ in range(5))
nose.tools.assert_equal(a, [5])
def test_parallel_kwargs():
""" Check the keyword argument processing of pmap.
"""
lst = range(10)
for n_jobs in (1, 4):
yield (nose.tools.assert_equal,
[f(x, y=1) for x in lst],
Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst)
)
def test_parallel_pickling():
""" Check that pmap captures the errors when it is passed an object
that cannot be pickled.
"""
def g(x):
return x ** 2
nose.tools.assert_raises(PickleError,
Parallel(),
(delayed(g)(x) for x in range(10))
)
def test_error_capture():
# Check that error are captured, and that correct exceptions
# are raised.
if mp is not None:
# A JoblibException will be raised only if there is indeed
# multiprocessing
nose.tools.assert_raises(JoblibException,
Parallel(n_jobs=2),
[delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))],
)
nose.tools.assert_raises(WorkerInterrupt,
Parallel(n_jobs=2),
[delayed(interrupt_raiser)(x) for x in (1, 0)],
)
else:
nose.tools.assert_raises(KeyboardInterrupt,
Parallel(n_jobs=2),
[delayed(interrupt_raiser)(x) for x in (1, 0)],
)
nose.tools.assert_raises(ZeroDivisionError,
Parallel(n_jobs=2),
[delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))],
)
try:
ex = JoblibException()
Parallel(n_jobs=1)(
delayed(division)(x, y) for x, y in zip((0, 1), (1, 0)))
except Exception:
# Cannot use 'except as' to maintain Python 2.5 compatibility
ex = sys.exc_info()[1]
nose.tools.assert_false(isinstance(ex, JoblibException))
class Counter(object):
def __init__(self, list1, list2):
self.list1 = list1
self.list2 = list2
def __call__(self, i):
self.list1.append(i)
nose.tools.assert_equal(len(self.list1), len(self.list2))
def consumer(queue, item):
queue.append('Consumed %s' % item)
def check_dispatch_one_job(backend):
""" Test that with only one job, Parallel does act as a iterator.
"""
queue = list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=1, backend=backend)(
delayed(consumer)(queue, x) for x in producer())
nose.tools.assert_equal(queue,
['Produced 0', 'Consumed 0',
'Produced 1', 'Consumed 1',
'Produced 2', 'Consumed 2',
'Produced 3', 'Consumed 3',
'Produced 4', 'Consumed 4',
'Produced 5', 'Consumed 5']
)
nose.tools.assert_equal(len(queue), 12)
def test_dispatch_one_job():
for backend in VALID_BACKENDS:
yield check_dispatch_one_job, backend
def check_dispatch_multiprocessing(backend):
""" Check that using pre_dispatch Parallel does indeed dispatch items
lazily.
"""
if mp is None:
raise nose.SkipTest()
manager = mp.Manager()
queue = manager.list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=2, pre_dispatch=3, backend=backend)(
delayed(consumer)(queue, 'any') for _ in producer())
# Only 3 tasks are dispatched out of 6. The 4th task is dispatched only
# after any of the first 3 jobs have completed.
nose.tools.assert_equal(list(queue)[:4],
['Produced 0', 'Produced 1', 'Produced 2',
'Consumed any', ])
nose.tools.assert_equal(len(queue), 12)
def test_dispatch_multiprocessing():
for backend in VALID_BACKENDS:
yield check_dispatch_multiprocessing, backend
def test_exception_dispatch():
"Make sure that exception raised during dispatch are indeed captured"
nose.tools.assert_raises(
ValueError,
Parallel(n_jobs=6, pre_dispatch=16, verbose=0),
(delayed(exception_raiser)(i) for i in range(30)),
)
def _reload_joblib():
# Retrieve the path of the parallel module in a robust way
joblib_path = Parallel.__module__.split(os.sep)
joblib_path = joblib_path[:1]
joblib_path.append('parallel.py')
joblib_path = '/'.join(joblib_path)
module = __import__(joblib_path)
# Reload the module. This should trigger a fail
reload(module)
def test_multiple_spawning():
# Test that attempting to launch a new Python after spawned
# subprocesses will raise an error, to avoid infinite loops on
# systems that do not support fork
if not int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)):
raise nose.SkipTest()
nose.tools.assert_raises(ImportError, Parallel(n_jobs=2),
[delayed(_reload_joblib)() for i in range(10)])
###############################################################################
# Test helpers
def test_joblib_exception():
# Smoke-test the custom exception
e = JoblibException('foobar')
# Test the repr
repr(e)
# Test the pickle
pickle.dumps(e)
def test_safe_function():
safe_division = SafeFunction(division)
nose.tools.assert_raises(JoblibException, safe_division, 1, 0)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# run as:
# python web2py.py -S eden -M -R applications/eden/static/scripts/tools/build.sahana.py
# or
# python web2py.py -S eden -M -R applications/eden/static/scripts/tools/build.sahana.py -A gis
#
#
# Built with code/inspiration from MapFish, OpenLayers & Michael Crute
#
try:
theme = settings.get_theme()
except:
print "ERROR: File now needs to be run in the web2py environment in order to pick up which theme to build"
exit()
import os
import sys
import shutil
SCRIPTPATH = os.path.join(request.folder, "static", "scripts", "tools")
os.chdir(SCRIPTPATH)
sys.path.append("./")
# For JS
import getopt
import jsmin, mergejs
# For CSS
import re
## Untested as libsass failing to run for me:
# For SCSS
#try:
# import sass
#except:
# print "Unable to import libsass: so if your theme includes SCSS sources, these won't be rebuilt"
def mergeCSS(inputFilenames, outputFilename):
output = ""
for inputFilename in inputFilenames:
output += open(inputFilename, "r").read()
open(outputFilename, "w").write(output)
return outputFilename
def cleanline(theLine):
""" Kills line breaks, tabs, and double spaces """
p = re.compile("(\n|\r|\t|\f|\v)+")
m = p.sub("", theLine)
# Kills double spaces
p = re.compile("( )+")
m = p.sub(" ", m)
# Removes last semicolon before }
p = re.compile("(; }|;})+")
m = p.sub("}", m)
# Removes space before {
p = re.compile("({ )+")
m = p.sub("{", m)
# Removes all comments
p = re.compile("/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/")
m = p.sub("", m)
# Strip off the Charset
p = re.compile("@CHARSET .*;")
m = p.sub("", m)
# Strip spaces before the {
p = re.compile(" {")
m = p.sub("{", m)
# Strip space after :
p = re.compile(": ")
m = p.sub(":", m)
# Strip space after ,
p = re.compile(", ")
m = p.sub(",", m)
# Strip space after ;
p = re.compile("; ")
m = p.sub(";", m)
return m
def compressCSS(inputFilename, outputFilename):
theFile = open(inputFilename, "r").read()
output = ""
for line in theFile:
output = output + cleanline(line)
# Once more, clean the entire file string
_output = cleanline(output)
open(outputFilename, "w").write(_output)
return
def dojs(dogis = False, warnings = True):
""" Minifies the JavaScript """
# Do we have local version of the Closure Compiler available?
use_compressor = "jsmin" # Fallback
try:
import closure
use_compressor = "closure"
print "using local Closure Compiler"
except Exception, E:
print "No closure (%s)" % E
print "Download from http://closure-compiler.googlecode.com/files/compiler-latest.zip"
try:
import closure_ws
use_compressor = "closure_ws"
print "Using Closure via Web Service - limited to files < 1Mb!"
except ImportError:
print "No closure_ws"
if use_compressor == "closure":
if not warnings:
closure.extra_params = "--warning_level QUIET"
minimize = closure.minimize
elif use_compressor == "closure_ws":
minimize = closure_ws.minimize
elif use_compressor == "jsmin":
minimize = jsmin.jsmin
sourceDirectory = ".."
configFilename = "sahana.js.cfg"
outputFilename = "S3.min.js"
# Merge JS files
print "Merging Core libraries."
merged = mergejs.run(sourceDirectory, None, configFilename)
# Compress JS files
print "Compressing - JS"
minimized = minimize(merged)
# Add license
print "Adding license file."
minimized = open("license.txt").read() + minimized
# Print to output files
print "Writing to %s." % outputFilename
open(outputFilename, "w").write(minimized)
# Remove old JS files
print "Deleting %s." % outputFilename
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
# Move new JS files
print "Moving new JS files"
shutil.move(outputFilename, "../S3")
# Bootstrap
# print "Compressing Bootstrap"
# sourceDirectoryBootstrap = ".."
# configFilenameBootstrap = "sahana.js.bootstrap.cfg"
# outputFilenameBootstrap = "bootstrap.min.js"
# mergedBootstrap = mergejs.run(sourceDirectoryBootstrap,
# None,
# configFilenameBootstrap)
# minimizedBootstrap = minimize(mergedBootstrap)
# open(outputFilenameBootstrap, "w").write(minimizedBootstrap)
# try:
# os.remove("../%s" % outputFilenameBootstrap)
# except:
# pass
# shutil.move(outputFilenameBootstrap, "..")
# Calendar
print "Compressing calendar"
sourceDirectory = ".."
configFilename = "sahana.js.calendar.cfg"
outputFilename = "s3.ui.calendar.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# dataLists
print "Compressing dataLists"
sourceDirectory = ".."
configFilename = "sahana.js.dataLists.cfg"
outputFilename = "s3.dataLists.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# dataTables
print "Compressing dataTables"
sourceDirectory = ".."
configFilename = "sahana.js.dataTables.cfg"
outputFilename = "s3.dataTables.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
configFilename = "sahana.js.dataTables_multi.cfg"
outputFilename = "s3.dataTables.multi.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# pivotTables
print "Compressing pivotTables"
sourceDirectory = ".."
configFilename = "sahana.js.pivotTables.cfg"
outputFilename = "s3.pivotTables.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# timeplot
print "Compressing timeplot"
sourceDirectory = ".."
configFilename = "sahana.js.timeplot.cfg"
outputFilename = "s3.timeplot.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# groupedItems
print "Compressing groupedItems"
sourceDirectory = ".."
configFilename = "sahana.js.groupeditems.cfg"
outputFilename = "s3.groupeditems.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# ImageCrop
print "Compressing ImageCrop"
sourceDirectory = ".."
configFilename = "sahana.js.imageCrop.cfg"
outputFilename = "s3.imagecrop.widget.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# JSTree
print "Compressing JSTree"
sourceDirectory = ".."
configFilename = "sahana.js.jstree.cfg"
outputFilename = "s3.jstree.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Chat
print "Compressing Chat"
sourceDirectory = ".."
configFilename = "sahana.js.chat.cfg"
outputFilename = "s3.chat.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Guided Tour
print "Compressing Guided Tour"
sourceDirectory = ".."
configFilename = "sahana.js.guidedTour.cfg"
outputFilename = "s3.guidedtour.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Single scripts
for filename in ("add_person",
"cap",
"gis",
"gis.feature_crud",
"gis.fullscreen",
"gis.latlon",
"gis.loader",
"gis.pois",
"locationselector.widget",
"msg",
"popup",
"register_validation",
"select_person",
"sync",
"timeline",
"ui.contacts",
"ui.embeddedcomponent",
"ui.locationselector",
"work",
):
print "Compressing s3.%s.js" % filename
inputFilename = os.path.join("..", "S3", "s3.%s.js" % filename)
outputFilename = "s3.%s.min.js" % filename
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../S3/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../S3")
# Enable when needed
full = False
if full:
for filename in ("spectrum",
"tag-it",
):
print "Compressing %s.js" % filename
in_f = os.path.join("..", filename + ".js")
out_f = os.path.join("..", filename + ".min.js")
with open(in_f, "r") as inp:
with open(out_f, "w") as out:
out.write(minimize(inp.read()))
# Vulnerability
print "Compressing Vulnerability"
sourceDirectory = "../.."
configFilename = "sahana.js.vulnerability.cfg"
outputFilename = "s3.vulnerability.min.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../../themes/Vulnerability/js/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../../themes/Vulnerability/js")
print "Compressing Vulnerability GIS"
sourceDirectory = "../.."
configFilename = "sahana.js.vulnerability_gis.cfg"
outputFilename = "OpenLayers.js"
merged = mergejs.run(sourceDirectory,
None,
configFilename)
minimized = minimize(merged)
open(outputFilename, "w").write(minimized)
try:
os.remove("../../themes/Vulnerability/js/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../../themes/Vulnerability/js")
if dogis:
sourceDirectoryOpenLayers = "../gis/openlayers/lib"
sourceDirectoryMGRS = "../gis"
sourceDirectoryGeoExt = "../gis/GeoExt/lib"
sourceDirectoryGxp = "../gis/gxp"
configFilenameOpenLayers = "sahana.js.ol.cfg"
configFilenameMGRS = "sahana.js.mgrs.cfg"
configFilenameGeoExt = "sahana.js.geoext.cfg"
configFilenameGxpMin = "sahana.js.gxp.cfg"
configFilenameGxp2 = "sahana.js.gxp2.cfg"
configFilenameGxpFull = "sahana.js.gxpfull.cfg"
outputFilenameOpenLayers = "OpenLayers.js"
outputFilenameMGRS = "MGRS.min.js"
outputFilenameGeoExt = "GeoExt.js"
outputFilenameGxp = "gxp.js"
outputFilenameGxp2 = "gxp_upload.js"
# Merge GIS JS Files
print "Merging OpenLayers libraries."
mergedOpenLayers = mergejs.run(sourceDirectoryOpenLayers,
None,
configFilenameOpenLayers)
print "Merging MGRS libraries."
mergedMGRS = mergejs.run(sourceDirectoryMGRS,
None,
configFilenameMGRS)
print "Merging GeoExt libraries."
mergedGeoExt = mergejs.run(sourceDirectoryGeoExt,
None,
configFilenameGeoExt)
print "Merging gxp libraries."
mergedGxpMin = mergejs.run(sourceDirectoryGxp,
None,
configFilenameGxpMin)
mergedGxp2 = mergejs.run(sourceDirectoryGxp,
None,
configFilenameGxp2)
mergedGxpFull = mergejs.run(sourceDirectoryGxp,
None,
configFilenameGxpFull)
# Compress JS files
print "Compressing - OpenLayers JS"
if use_compressor == "closure_ws":
# Limited to files < 1Mb!
minimizedOpenLayers = jsmin.jsmin(mergedOpenLayers)
#minimizedOpenLayers = jsmin.jsmin("%s\n%s" % (mergedOpenLayers,
# mergedOpenLayersExten))
else:
minimizedOpenLayers = minimize(mergedOpenLayers)
#minimizedOpenLayers = minimize("%s\n%s" % (mergedOpenLayers,
# mergedOpenLayersExten))
# OpenLayers extensions
for filename in ["OWM.OpenLayers",
]:
inputFilename = os.path.join("..", "gis", "%s.js" % filename)
outputFilename = "%s.min.js" % filename
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../gis/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../gis")
print "Compressing - MGRS JS"
minimizedMGRS = minimize(mergedMGRS)
print "Compressing - GeoExt JS"
minimizedGeoExt = minimize("%s\n%s" % (mergedGeoExt,
#mergedGeoExtux,
mergedGxpMin))
# GeoNamesSearchCombo
inputFilename = os.path.join("..", "gis", "GeoExt", "ux", "GeoNamesSearchCombo.js")
outputFilename = "GeoNamesSearchCombo.min.js"
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../gis/GeoExt/ux/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../gis/GeoExt/ux")
print "Compressing - gxp JS"
minimizedGxp = minimize(mergedGxpFull)
minimizedGxp2 = minimize(mergedGxp2)
for filename in ("WMSGetFeatureInfo",
):
inputFilename = os.path.join("..", "gis", "gxp", "plugins", "%s.js" % filename)
outputFilename = "%s.min.js" % filename
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../gis/gxp/plugins/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../gis/gxp/plugins")
for filename in ("GoogleEarthPanel",
"GoogleStreetViewPanel",
):
inputFilename = os.path.join("..", "gis", "gxp", "widgets", "%s.js" % filename)
outputFilename = "%s.min.js" % filename
input = open(inputFilename, "r").read()
minimized = minimize(input)
open(outputFilename, "w").write(minimized)
try:
os.remove("../gis/gxp/widgets/%s" % outputFilename)
except:
pass
shutil.move(outputFilename, "../gis/gxp/widgets")
# Add license
#minimizedGIS = open("license.gis.txt").read() + minimizedGIS
# Print to output files
print "Writing to %s." % outputFilenameOpenLayers
open(outputFilenameOpenLayers, "w").write(minimizedOpenLayers)
print "Writing to %s." % outputFilenameMGRS
open(outputFilenameMGRS, "w").write(minimizedMGRS)
print "Writing to %s." % outputFilenameGeoExt
open(outputFilenameGeoExt, "w").write(minimizedGeoExt)
print "Writing to %s." % outputFilenameGxp
open(outputFilenameGxp, "w").write(minimizedGxp)
print "Writing to %s." % outputFilenameGxp2
open(outputFilenameGxp2, "w").write(minimizedGxp2)
# Move new JS files
print "Deleting %s." % outputFilenameOpenLayers
try:
os.remove("../gis/%s" % outputFilenameOpenLayers)
except:
pass
print "Moving new OpenLayers JS files"
shutil.move(outputFilenameOpenLayers, "../gis")
print "Deleting %s." % outputFilenameMGRS
try:
os.remove("../gis/%s" % outputFilenameMGRS)
except:
pass
print "Moving new MGRS JS files"
shutil.move(outputFilenameMGRS, "../gis")
print "Deleting %s." % outputFilenameGeoExt
try:
os.remove("../gis/%s" % outputFilenameGeoExt)
except:
pass
print "Moving new GeoExt JS files"
shutil.move(outputFilenameGeoExt, "../gis")
print "Deleting %s." % outputFilenameGxp
try:
os.remove("../gis/%s" % outputFilenameGxp)
except:
pass
print "Moving new gxp JS files"
shutil.move(outputFilenameGxp, "../gis")
print "Deleting %s." % outputFilenameGxp2
try:
os.remove("../gis/%s" % outputFilenameGxp2)
except:
pass
print "Moving new gxp2 JS files"
shutil.move(outputFilenameGxp2, "../gis")
def docss():
""" Compresses the CSS files """
# Theme
theme = settings.get_theme()
location = current.response.s3.theme_location
print "Using theme %s" % theme
if location:
css_cfg = os.path.join("..", "..", "..", "modules", "templates", location[:-1], theme, "css.cfg")
else:
css_cfg = os.path.join("..", "..", "..", "modules", "templates", theme, "css.cfg")
f = open(css_cfg, "r")
files = f.readlines()
f.close()
listCSS = []
for file in files[:-1]:
if file[0] != "#":
# Real line, not a comment
if file[:5] == "SCSS ":
# Compile the SCSS first
file = file[5:]
filename = file.split("/")[-1].split(".")[0]
sourcePath = os.path.join("..", "..", "..", location, "templates", theme, "scss")
sourceFilename = os.path.join(sourcePath, "%s.scss" % filename)
sourceFile = open(sourceFilename, "r")
source = sourceFile.read()
sourceFile.close()
os.chdir(sourcePath)
outputText = sass.compile(source)
os.chdir(SCRIPTPATH)
outputFile = open(file, "w")
outputFile.write(outputText)
outputFile.close()
p = re.compile("(\n|\r|\t|\f|\v)+")
file = p.sub("", file)
listCSS.append("../../styles/%s" % file)
outputFilenameCSS = "eden.min.css"
# Merge CSS files
print "Merging Core styles."
mergedCSS = mergeCSS(listCSS, outputFilenameCSS)
# Compress CSS files
print "Writing to %s." % outputFilenameCSS
compressCSS(mergedCSS, outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../../themes/%s/%s" % (theme, outputFilenameCSS))
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../../themes/%s" % theme)
# Enable when needed
full = False
if full:
for filename in ("joyride",
"jstree",
"spectrum",
):
print "Merging %s styles." % filename
listCSS = ("../../styles/plugins/%s.css" % filename,)
outputFilenameCSS = "%s.min.css" % filename
mergedCSS = mergeCSS(listCSS, outputFilenameCSS)
print "Writing to %s." % outputFilenameCSS
compressCSS(mergedCSS, outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../../styles/plugins/%s" % outputFilenameCSS)
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../../styles/plugins")
# Bootstrap
print "Bootstrap CSS"
listCSS = []
for file in ["bootstrap.css",
"bootstrap-responsive.css",
"font-awesome.css",
#"bootstrap-multiselect.css",
]:
listCSS.append("../../styles/bootstrap/%s" % file)
outputFilenameCSS = "bootstrap-combined.min.css"
# Merge CSS files
print "Merging Bootstrap styles."
mergedCSS = mergeCSS(listCSS, outputFilenameCSS)
# Compress CSS files
print "Writing to %s." % outputFilenameCSS
compressCSS(mergedCSS, outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../../styles/bootstrap/%s" % outputFilenameCSS)
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../../styles/bootstrap")
# Ext
print "Ext Gray CSS"
listCSS = []
for file in ["ext-all-notheme.css",
"xtheme-gray.css",
]:
listCSS.append("../ext/resources/css/%s" % file)
outputFilenameCSS = "ext-gray.min.css"
# Merge CSS files
print "Merging Ext styles."
mergedCSS = mergeCSS(listCSS, outputFilenameCSS)
# Compress CSS file
print "Writing to %s." % outputFilenameCSS
compressCSS(mergedCSS, outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../ext/resources/css/%s" % outputFilenameCSS)
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../ext/resources/css")
print "Ext no-Theme CSS"
outputFilenameCSS = "ext-notheme.min.css"
# Compress CSS file
print "Writing to %s." % outputFilenameCSS
compressCSS("../ext/resources/css/ext-all-notheme.css", outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../ext/resources/css/%s" % outputFilenameCSS)
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../ext/resources/css")
print "Ext Themes CSS"
outputFilenameCSS = "xtheme-ifrc.min.css"
# Compress CSS file
print "Writing to %s." % outputFilenameCSS
compressCSS("../../themes/IFRC/xtheme-ifrc.css", outputFilenameCSS)
# Move files to correct locations
print "Deleting %s." % outputFilenameCSS
try:
os.remove("../../themes/IFRC/%s" % outputFilenameCSS)
except:
pass
print "Moving new %s." % outputFilenameCSS
shutil.move(outputFilenameCSS, "../../themes/IFRC")
def main(argv):
if len(argv) > 0:
parameter1 = argv[0]
else:
parameter1 = "ALL"
if len(argv) > 1:
if(argv[1] == "DOGIS"):
parameter2 = True
else:
parameter2 = False
else:
parameter2 = True
closure_warnings = True
if "NOWARN" in argv:
closure_warnings = False
if parameter1 in ("ALL", "NOWARN"):
dojs(warnings=closure_warnings)
docss()
else:
if parameter1 in ("CSS", "css"):
docss()
else:
dojs(parameter2, warnings=closure_warnings)
docss()
print "Done."
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| |
# -*- coding: utf-8 -*-
'''
Module for managing locales on POSIX-like systems.
'''
from __future__ import absolute_import
# Import python libs
import logging
import re
import os
HAS_DBUS = False
try:
import dbus
HAS_DBUS = True
except ImportError:
pass
# Import salt libs
import salt.utils
import salt.utils.locales
import salt.utils.systemd
import salt.ext.six as six
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'locale'
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.is_windows():
return (False, 'Cannot load locale module: windows platforms are unsupported')
return __virtualname__
def _parse_dbus_locale():
'''
Get the 'System Locale' parameters from dbus
'''
bus = dbus.SystemBus()
localed = bus.get_object('org.freedesktop.locale1',
'/org/freedesktop/locale1')
properties = dbus.Interface(localed, 'org.freedesktop.DBus.Properties')
system_locale = properties.Get('org.freedesktop.locale1', 'Locale')
ret = {}
for env_var in system_locale:
match = re.match('^([A-Z_]+)=(.*)$', env_var)
if match:
ret[match.group(1)] = match.group(2).replace('"', '')
else:
log.error('Odd locale parameter "{0}" detected in dbus locale '
'output. This should not happen. You should '
'probably investigate what caused this.'.format(
env_var))
return ret
def _parse_localectl():
'''
Get the 'System Locale' parameters from localectl
'''
ret = {}
localectl_out = __salt__['cmd.run']('localectl')
reading_locale = False
for line in localectl_out.splitlines():
if 'System Locale:' in line:
line = line.replace('System Locale:', '')
reading_locale = True
if not reading_locale:
continue
match = re.match('^([A-Z_]+)=(.*)$', line.strip())
if not match:
break
ret[match.group(1)] = match.group(2).replace('"', '')
else:
raise CommandExecutionError('Could not find system locale - could not '
'parse localectl output\n{0}'.format(localectl_out))
return ret
def _localectl_set(locale=''):
'''
Use systemd's localectl command to set the LANG locale parameter, making
sure not to trample on other params that have been set.
'''
locale_params = _parse_dbus_locale() if HAS_DBUS else _parse_localectl()
locale_params['LANG'] = str(locale)
args = ' '.join(['{0}="{1}"'.format(k, v)
for k, v in six.iteritems(locale_params)])
cmd = 'localectl set-locale {0}'.format(args)
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
def list_avail():
'''
Lists available (compiled) locales
CLI Example:
.. code-block:: bash
salt '*' locale.list_avail
'''
cmd = 'locale -a'
out = __salt__['cmd.run'](cmd).split('\n')
return out
def get_locale():
'''
Get the current system locale
CLI Example:
.. code-block:: bash
salt '*' locale.get_locale
'''
cmd = ''
if salt.utils.systemd.booted(__context__):
params = _parse_dbus_locale() if HAS_DBUS else _parse_localectl()
return params.get('LANG', '')
elif 'RedHat' in __grains__['os_family']:
cmd = 'grep "^LANG=" /etc/sysconfig/i18n'
elif 'Suse' in __grains__['os_family']:
cmd = 'grep "^RC_LANG" /etc/sysconfig/language'
elif 'Debian' in __grains__['os_family']:
# this block only applies to Debian without systemd
cmd = 'grep "^LANG=" /etc/default/locale'
elif 'Gentoo' in __grains__['os_family']:
cmd = 'eselect --brief locale show'
return __salt__['cmd.run'](cmd).strip()
elif 'Solaris' in __grains__['os_family']:
cmd = 'grep "^LANG=" /etc/default/init'
else: # don't waste time on a failing cmd.run
raise CommandExecutionError('Error: Unsupported platform!')
try:
return __salt__['cmd.run'](cmd).split('=')[1].replace('"', '')
except IndexError:
return ''
def set_locale(locale):
'''
Sets the current system locale
CLI Example:
.. code-block:: bash
salt '*' locale.set_locale 'en_US.UTF-8'
'''
if salt.utils.systemd.booted(__context__):
return _localectl_set(locale)
elif 'RedHat' in __grains__['os_family']:
if not __salt__['file.file_exists']('/etc/sysconfig/i18n'):
__salt__['file.touch']('/etc/sysconfig/i18n')
__salt__['file.replace'](
'/etc/sysconfig/i18n',
'^LANG=.*',
'LANG="{0}"'.format(locale),
append_if_not_found=True
)
elif 'Suse' in __grains__['os_family']:
if not __salt__['file.file_exists']('/etc/sysconfig/language'):
__salt__['file.touch']('/etc/sysconfig/language')
__salt__['file.replace'](
'/etc/sysconfig/language',
'^RC_LANG=.*',
'RC_LANG="{0}"'.format(locale),
append_if_not_found=True
)
elif 'Debian' in __grains__['os_family']:
# this block only applies to Debian without systemd
update_locale = salt.utils.which('update-locale')
if update_locale is None:
raise CommandExecutionError(
'Cannot set locale: "update-locale" was not found.')
__salt__['cmd.run'](update_locale) # (re)generate /etc/default/locale
# FIXME: why are we writing to a file that is dynamically generated?
__salt__['file.replace'](
'/etc/default/locale',
'^LANG=.*',
'LANG="{0}"'.format(locale),
append_if_not_found=True
)
elif 'Gentoo' in __grains__['os_family']:
cmd = 'eselect --brief locale set {0}'.format(locale)
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'Solaris' in __grains__['os_family']:
if locale not in __salt__['locale.list_avail']():
return False
__salt__['file.replace'](
'/etc/default/init',
'^LANG=.*',
'LANG="{0}"'.format(locale),
append_if_not_found=True
)
else:
raise CommandExecutionError('Error: Unsupported platform!')
return True
def avail(locale):
'''
Check if a locale is available.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' locale.avail 'en_US.UTF-8'
'''
try:
normalized_locale = salt.utils.locales.normalize_locale(locale)
except IndexError:
log.error('Unable to validate locale "{0}"'.format(locale))
return False
avail_locales = __salt__['locale.list_avail']()
locale_exists = next((True for x in avail_locales
if salt.utils.locales.normalize_locale(x.strip()) == normalized_locale), False)
return locale_exists
def gen_locale(locale, **kwargs):
'''
Generate a locale. Options:
.. versionadded:: 2014.7.0
:param locale: Any locale listed in /usr/share/i18n/locales or
/usr/share/i18n/SUPPORTED for Debian and Gentoo based distributions,
which require the charmap to be specified as part of the locale
when generating it.
verbose
Show extra warnings about errors that are normally ignored.
CLI Example:
.. code-block:: bash
salt '*' locale.gen_locale en_US.UTF-8
salt '*' locale.gen_locale 'en_IE.UTF-8 UTF-8' # Debian/Gentoo only
'''
on_debian = __grains__.get('os') == 'Debian'
on_ubuntu = __grains__.get('os') == 'Ubuntu'
on_gentoo = __grains__.get('os_family') == 'Gentoo'
on_suse = __grains__.get('os_family') == 'Suse'
on_solaris = __grains__.get('os_family') == 'Solaris'
if on_solaris: # all locales are pre-generated
return locale in __salt__['locale.list_avail']()
locale_info = salt.utils.locales.split_locale(locale)
# if the charmap has not been supplied, normalize by appening it
if not locale_info['charmap'] and not on_ubuntu:
locale_info['charmap'] = locale_info['codeset']
locale = salt.utils.locales.join_locale(locale_info)
if on_debian or on_gentoo: # file-based search
search = '/usr/share/i18n/SUPPORTED'
valid = __salt__['file.search'](search,
'^{0}$'.format(locale),
flags=re.MULTILINE)
else: # directory-based search
if on_suse:
search = '/usr/share/locale'
else:
search = '/usr/share/i18n/locales'
try:
valid = "{0}_{1}".format(locale_info['language'],
locale_info['territory']) in os.listdir(search)
except OSError as ex:
log.error(ex)
raise CommandExecutionError(
"Locale \"{0}\" is not available.".format(locale))
if not valid:
log.error(
'The provided locale "{0}" is not found in {1}'.format(locale, search))
return False
if os.path.exists('/etc/locale.gen'):
__salt__['file.replace'](
'/etc/locale.gen',
r'^\s*#\s*{0}\s*$'.format(locale),
'{0}\n'.format(locale),
append_if_not_found=True
)
elif on_ubuntu:
__salt__['file.touch'](
'/var/lib/locales/supported.d/{0}'.format(locale_info['language'])
)
__salt__['file.replace'](
'/var/lib/locales/supported.d/{0}'.format(locale_info['language']),
locale,
locale,
append_if_not_found=True
)
if salt.utils.which("locale-gen") is not None:
cmd = ['locale-gen']
if on_gentoo:
cmd.append('--generate')
cmd.append(locale)
elif salt.utils.which("localedef") is not None:
cmd = ['localedef', '--force',
'-i', "{0}_{1}".format(locale_info['language'],
locale_info['territory']),
'-f', locale_info['codeset'],
'{0}_{1}.{2}'.format(locale_info['language'],
locale_info['territory'],
locale_info['codeset'])]
cmd.append(kwargs.get('verbose', False) and '--verbose' or '--quiet')
else:
raise CommandExecutionError(
'Command "locale-gen" or "localedef" was not found on this system.')
res = __salt__['cmd.run_all'](cmd)
if res['retcode']:
log.error(res['stderr'])
if kwargs.get('verbose'):
return res
else:
return res['retcode'] == 0
| |
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import math
from io import BytesIO
from s3transfer.compat import readable, seekable
from s3transfer.futures import IN_MEMORY_UPLOAD_TAG
from s3transfer.tasks import (
CompleteMultipartUploadTask,
CreateMultipartUploadTask,
SubmissionTask,
Task,
)
from s3transfer.utils import (
ChunksizeAdjuster,
DeferredOpenFile,
get_callbacks,
get_filtered_dict,
)
class AggregatedProgressCallback:
def __init__(self, callbacks, threshold=1024 * 256):
"""Aggregates progress updates for every provided progress callback
:type callbacks: A list of functions that accepts bytes_transferred
as a single argument
:param callbacks: The callbacks to invoke when threshold is reached
:type threshold: int
:param threshold: The progress threshold in which to take the
aggregated progress and invoke the progress callback with that
aggregated progress total
"""
self._callbacks = callbacks
self._threshold = threshold
self._bytes_seen = 0
def __call__(self, bytes_transferred):
self._bytes_seen += bytes_transferred
if self._bytes_seen >= self._threshold:
self._trigger_callbacks()
def flush(self):
"""Flushes out any progress that has not been sent to its callbacks"""
if self._bytes_seen > 0:
self._trigger_callbacks()
def _trigger_callbacks(self):
for callback in self._callbacks:
callback(bytes_transferred=self._bytes_seen)
self._bytes_seen = 0
class InterruptReader:
"""Wrapper that can interrupt reading using an error
It uses a transfer coordinator to propagate an error if it notices
that a read is being made while the file is being read from.
:type fileobj: file-like obj
:param fileobj: The file-like object to read from
:type transfer_coordinator: s3transfer.futures.TransferCoordinator
:param transfer_coordinator: The transfer coordinator to use if the
reader needs to be interrupted.
"""
def __init__(self, fileobj, transfer_coordinator):
self._fileobj = fileobj
self._transfer_coordinator = transfer_coordinator
def read(self, amount=None):
# If there is an exception, then raise the exception.
# We raise an error instead of returning no bytes because for
# requests where the content length and md5 was sent, it will
# cause md5 mismatches and retries as there was no indication that
# the stream being read from encountered any issues.
if self._transfer_coordinator.exception:
raise self._transfer_coordinator.exception
return self._fileobj.read(amount)
def seek(self, where, whence=0):
self._fileobj.seek(where, whence)
def tell(self):
return self._fileobj.tell()
def close(self):
self._fileobj.close()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
class UploadInputManager:
"""Base manager class for handling various types of files for uploads
This class is typically used for the UploadSubmissionTask class to help
determine the following:
* How to determine the size of the file
* How to determine if a multipart upload is required
* How to retrieve the body for a PutObject
* How to retrieve the bodies for a set of UploadParts
The answers/implementations differ for the various types of file inputs
that may be accepted. All implementations must subclass and override
public methods from this class.
"""
def __init__(self, osutil, transfer_coordinator, bandwidth_limiter=None):
self._osutil = osutil
self._transfer_coordinator = transfer_coordinator
self._bandwidth_limiter = bandwidth_limiter
@classmethod
def is_compatible(cls, upload_source):
"""Determines if the source for the upload is compatible with manager
:param upload_source: The source for which the upload will pull data
from.
:returns: True if the manager can handle the type of source specified
otherwise returns False.
"""
raise NotImplementedError('must implement _is_compatible()')
def stores_body_in_memory(self, operation_name):
"""Whether the body it provides are stored in-memory
:type operation_name: str
:param operation_name: The name of the client operation that the body
is being used for. Valid operation_names are ``put_object`` and
``upload_part``.
:rtype: boolean
:returns: True if the body returned by the manager will be stored in
memory. False if the manager will not directly store the body in
memory.
"""
raise NotImplementedError('must implement store_body_in_memory()')
def provide_transfer_size(self, transfer_future):
"""Provides the transfer size of an upload
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The future associated with upload request
"""
raise NotImplementedError('must implement provide_transfer_size()')
def requires_multipart_upload(self, transfer_future, config):
"""Determines where a multipart upload is required
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The future associated with upload request
:type config: s3transfer.manager.TransferConfig
:param config: The config associated to the transfer manager
:rtype: boolean
:returns: True, if the upload should be multipart based on
configuration and size. False, otherwise.
"""
raise NotImplementedError('must implement requires_multipart_upload()')
def get_put_object_body(self, transfer_future):
"""Returns the body to use for PutObject
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The future associated with upload request
:type config: s3transfer.manager.TransferConfig
:param config: The config associated to the transfer manager
:rtype: s3transfer.utils.ReadFileChunk
:returns: A ReadFileChunk including all progress callbacks
associated with the transfer future.
"""
raise NotImplementedError('must implement get_put_object_body()')
def yield_upload_part_bodies(self, transfer_future, chunksize):
"""Yields the part number and body to use for each UploadPart
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The future associated with upload request
:type chunksize: int
:param chunksize: The chunksize to use for this upload.
:rtype: int, s3transfer.utils.ReadFileChunk
:returns: Yields the part number and the ReadFileChunk including all
progress callbacks associated with the transfer future for that
specific yielded part.
"""
raise NotImplementedError('must implement yield_upload_part_bodies()')
def _wrap_fileobj(self, fileobj):
fileobj = InterruptReader(fileobj, self._transfer_coordinator)
if self._bandwidth_limiter:
fileobj = self._bandwidth_limiter.get_bandwith_limited_stream(
fileobj, self._transfer_coordinator, enabled=False
)
return fileobj
def _get_progress_callbacks(self, transfer_future):
callbacks = get_callbacks(transfer_future, 'progress')
# We only want to be wrapping the callbacks if there are callbacks to
# invoke because we do not want to be doing any unnecessary work if
# there are no callbacks to invoke.
if callbacks:
return [AggregatedProgressCallback(callbacks)]
return []
def _get_close_callbacks(self, aggregated_progress_callbacks):
return [callback.flush for callback in aggregated_progress_callbacks]
class UploadFilenameInputManager(UploadInputManager):
"""Upload utility for filenames"""
@classmethod
def is_compatible(cls, upload_source):
return isinstance(upload_source, str)
def stores_body_in_memory(self, operation_name):
return False
def provide_transfer_size(self, transfer_future):
transfer_future.meta.provide_transfer_size(
self._osutil.get_file_size(transfer_future.meta.call_args.fileobj)
)
def requires_multipart_upload(self, transfer_future, config):
return transfer_future.meta.size >= config.multipart_threshold
def get_put_object_body(self, transfer_future):
# Get a file-like object for the given input
fileobj, full_size = self._get_put_object_fileobj_with_full_size(
transfer_future
)
# Wrap fileobj with interrupt reader that will quickly cancel
# uploads if needed instead of having to wait for the socket
# to completely read all of the data.
fileobj = self._wrap_fileobj(fileobj)
callbacks = self._get_progress_callbacks(transfer_future)
close_callbacks = self._get_close_callbacks(callbacks)
size = transfer_future.meta.size
# Return the file-like object wrapped into a ReadFileChunk to get
# progress.
return self._osutil.open_file_chunk_reader_from_fileobj(
fileobj=fileobj,
chunk_size=size,
full_file_size=full_size,
callbacks=callbacks,
close_callbacks=close_callbacks,
)
def yield_upload_part_bodies(self, transfer_future, chunksize):
full_file_size = transfer_future.meta.size
num_parts = self._get_num_parts(transfer_future, chunksize)
for part_number in range(1, num_parts + 1):
callbacks = self._get_progress_callbacks(transfer_future)
close_callbacks = self._get_close_callbacks(callbacks)
start_byte = chunksize * (part_number - 1)
# Get a file-like object for that part and the size of the full
# file size for the associated file-like object for that part.
fileobj, full_size = self._get_upload_part_fileobj_with_full_size(
transfer_future.meta.call_args.fileobj,
start_byte=start_byte,
part_size=chunksize,
full_file_size=full_file_size,
)
# Wrap fileobj with interrupt reader that will quickly cancel
# uploads if needed instead of having to wait for the socket
# to completely read all of the data.
fileobj = self._wrap_fileobj(fileobj)
# Wrap the file-like object into a ReadFileChunk to get progress.
read_file_chunk = self._osutil.open_file_chunk_reader_from_fileobj(
fileobj=fileobj,
chunk_size=chunksize,
full_file_size=full_size,
callbacks=callbacks,
close_callbacks=close_callbacks,
)
yield part_number, read_file_chunk
def _get_deferred_open_file(self, fileobj, start_byte):
fileobj = DeferredOpenFile(
fileobj, start_byte, open_function=self._osutil.open
)
return fileobj
def _get_put_object_fileobj_with_full_size(self, transfer_future):
fileobj = transfer_future.meta.call_args.fileobj
size = transfer_future.meta.size
return self._get_deferred_open_file(fileobj, 0), size
def _get_upload_part_fileobj_with_full_size(self, fileobj, **kwargs):
start_byte = kwargs['start_byte']
full_size = kwargs['full_file_size']
return self._get_deferred_open_file(fileobj, start_byte), full_size
def _get_num_parts(self, transfer_future, part_size):
return int(math.ceil(transfer_future.meta.size / float(part_size)))
class UploadSeekableInputManager(UploadFilenameInputManager):
"""Upload utility for an open file object"""
@classmethod
def is_compatible(cls, upload_source):
return readable(upload_source) and seekable(upload_source)
def stores_body_in_memory(self, operation_name):
if operation_name == 'put_object':
return False
else:
return True
def provide_transfer_size(self, transfer_future):
fileobj = transfer_future.meta.call_args.fileobj
# To determine size, first determine the starting position
# Seek to the end and then find the difference in the length
# between the end and start positions.
start_position = fileobj.tell()
fileobj.seek(0, 2)
end_position = fileobj.tell()
fileobj.seek(start_position)
transfer_future.meta.provide_transfer_size(
end_position - start_position
)
def _get_upload_part_fileobj_with_full_size(self, fileobj, **kwargs):
# Note: It is unfortunate that in order to do a multithreaded
# multipart upload we cannot simply copy the filelike object
# since there is not really a mechanism in python (i.e. os.dup
# points to the same OS filehandle which causes concurrency
# issues). So instead we need to read from the fileobj and
# chunk the data out to separate file-like objects in memory.
data = fileobj.read(kwargs['part_size'])
# We return the length of the data instead of the full_file_size
# because we partitioned the data into separate BytesIO objects
# meaning the BytesIO object has no knowledge of its start position
# relative the input source nor access to the rest of the input
# source. So we must treat it as its own standalone file.
return BytesIO(data), len(data)
def _get_put_object_fileobj_with_full_size(self, transfer_future):
fileobj = transfer_future.meta.call_args.fileobj
# The current position needs to be taken into account when retrieving
# the full size of the file.
size = fileobj.tell() + transfer_future.meta.size
return fileobj, size
class UploadNonSeekableInputManager(UploadInputManager):
"""Upload utility for a file-like object that cannot seek."""
def __init__(self, osutil, transfer_coordinator, bandwidth_limiter=None):
super().__init__(osutil, transfer_coordinator, bandwidth_limiter)
self._initial_data = b''
@classmethod
def is_compatible(cls, upload_source):
return readable(upload_source)
def stores_body_in_memory(self, operation_name):
return True
def provide_transfer_size(self, transfer_future):
# No-op because there is no way to do this short of reading the entire
# body into memory.
return
def requires_multipart_upload(self, transfer_future, config):
# If the user has set the size, we can use that.
if transfer_future.meta.size is not None:
return transfer_future.meta.size >= config.multipart_threshold
# This is tricky to determine in this case because we can't know how
# large the input is. So to figure it out, we read data into memory
# up until the threshold and compare how much data was actually read
# against the threshold.
fileobj = transfer_future.meta.call_args.fileobj
threshold = config.multipart_threshold
self._initial_data = self._read(fileobj, threshold, False)
if len(self._initial_data) < threshold:
return False
else:
return True
def get_put_object_body(self, transfer_future):
callbacks = self._get_progress_callbacks(transfer_future)
close_callbacks = self._get_close_callbacks(callbacks)
fileobj = transfer_future.meta.call_args.fileobj
body = self._wrap_data(
self._initial_data + fileobj.read(), callbacks, close_callbacks
)
# Zero out the stored data so we don't have additional copies
# hanging around in memory.
self._initial_data = None
return body
def yield_upload_part_bodies(self, transfer_future, chunksize):
file_object = transfer_future.meta.call_args.fileobj
part_number = 0
# Continue reading parts from the file-like object until it is empty.
while True:
callbacks = self._get_progress_callbacks(transfer_future)
close_callbacks = self._get_close_callbacks(callbacks)
part_number += 1
part_content = self._read(file_object, chunksize)
if not part_content:
break
part_object = self._wrap_data(
part_content, callbacks, close_callbacks
)
# Zero out part_content to avoid hanging on to additional data.
part_content = None
yield part_number, part_object
def _read(self, fileobj, amount, truncate=True):
"""
Reads a specific amount of data from a stream and returns it. If there
is any data in initial_data, that will be popped out first.
:type fileobj: A file-like object that implements read
:param fileobj: The stream to read from.
:type amount: int
:param amount: The number of bytes to read from the stream.
:type truncate: bool
:param truncate: Whether or not to truncate initial_data after
reading from it.
:return: Generator which generates part bodies from the initial data.
"""
# If the the initial data is empty, we simply read from the fileobj
if len(self._initial_data) == 0:
return fileobj.read(amount)
# If the requested number of bytes is less than the amount of
# initial data, pull entirely from initial data.
if amount <= len(self._initial_data):
data = self._initial_data[:amount]
# Truncate initial data so we don't hang onto the data longer
# than we need.
if truncate:
self._initial_data = self._initial_data[amount:]
return data
# At this point there is some initial data left, but not enough to
# satisfy the number of bytes requested. Pull out the remaining
# initial data and read the rest from the fileobj.
amount_to_read = amount - len(self._initial_data)
data = self._initial_data + fileobj.read(amount_to_read)
# Zero out initial data so we don't hang onto the data any more.
if truncate:
self._initial_data = b''
return data
def _wrap_data(self, data, callbacks, close_callbacks):
"""
Wraps data with the interrupt reader and the file chunk reader.
:type data: bytes
:param data: The data to wrap.
:type callbacks: list
:param callbacks: The callbacks associated with the transfer future.
:type close_callbacks: list
:param close_callbacks: The callbacks to be called when closing the
wrapper for the data.
:return: Fully wrapped data.
"""
fileobj = self._wrap_fileobj(BytesIO(data))
return self._osutil.open_file_chunk_reader_from_fileobj(
fileobj=fileobj,
chunk_size=len(data),
full_file_size=len(data),
callbacks=callbacks,
close_callbacks=close_callbacks,
)
class UploadSubmissionTask(SubmissionTask):
"""Task for submitting tasks to execute an upload"""
UPLOAD_PART_ARGS = [
'ChecksumAlgorithm',
'SSECustomerKey',
'SSECustomerAlgorithm',
'SSECustomerKeyMD5',
'RequestPayer',
'ExpectedBucketOwner',
]
COMPLETE_MULTIPART_ARGS = ['RequestPayer', 'ExpectedBucketOwner']
def _get_upload_input_manager_cls(self, transfer_future):
"""Retrieves a class for managing input for an upload based on file type
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The transfer future for the request
:rtype: class of UploadInputManager
:returns: The appropriate class to use for managing a specific type of
input for uploads.
"""
upload_manager_resolver_chain = [
UploadFilenameInputManager,
UploadSeekableInputManager,
UploadNonSeekableInputManager,
]
fileobj = transfer_future.meta.call_args.fileobj
for upload_manager_cls in upload_manager_resolver_chain:
if upload_manager_cls.is_compatible(fileobj):
return upload_manager_cls
raise RuntimeError(
'Input {} of type: {} is not supported.'.format(
fileobj, type(fileobj)
)
)
def _submit(
self,
client,
config,
osutil,
request_executor,
transfer_future,
bandwidth_limiter=None,
):
"""
:param client: The client associated with the transfer manager
:type config: s3transfer.manager.TransferConfig
:param config: The transfer config associated with the transfer
manager
:type osutil: s3transfer.utils.OSUtil
:param osutil: The os utility associated to the transfer manager
:type request_executor: s3transfer.futures.BoundedExecutor
:param request_executor: The request executor associated with the
transfer manager
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The transfer future associated with the
transfer request that tasks are being submitted for
"""
upload_input_manager = self._get_upload_input_manager_cls(
transfer_future
)(osutil, self._transfer_coordinator, bandwidth_limiter)
# Determine the size if it was not provided
if transfer_future.meta.size is None:
upload_input_manager.provide_transfer_size(transfer_future)
# Do a multipart upload if needed, otherwise do a regular put object.
if not upload_input_manager.requires_multipart_upload(
transfer_future, config
):
self._submit_upload_request(
client,
config,
osutil,
request_executor,
transfer_future,
upload_input_manager,
)
else:
self._submit_multipart_request(
client,
config,
osutil,
request_executor,
transfer_future,
upload_input_manager,
)
def _submit_upload_request(
self,
client,
config,
osutil,
request_executor,
transfer_future,
upload_input_manager,
):
call_args = transfer_future.meta.call_args
# Get any tags that need to be associated to the put object task
put_object_tag = self._get_upload_task_tag(
upload_input_manager, 'put_object'
)
# Submit the request of a single upload.
self._transfer_coordinator.submit(
request_executor,
PutObjectTask(
transfer_coordinator=self._transfer_coordinator,
main_kwargs={
'client': client,
'fileobj': upload_input_manager.get_put_object_body(
transfer_future
),
'bucket': call_args.bucket,
'key': call_args.key,
'extra_args': call_args.extra_args,
},
is_final=True,
),
tag=put_object_tag,
)
def _submit_multipart_request(
self,
client,
config,
osutil,
request_executor,
transfer_future,
upload_input_manager,
):
call_args = transfer_future.meta.call_args
# Submit the request to create a multipart upload.
create_multipart_future = self._transfer_coordinator.submit(
request_executor,
CreateMultipartUploadTask(
transfer_coordinator=self._transfer_coordinator,
main_kwargs={
'client': client,
'bucket': call_args.bucket,
'key': call_args.key,
'extra_args': call_args.extra_args,
},
),
)
# Submit requests to upload the parts of the file.
part_futures = []
extra_part_args = self._extra_upload_part_args(call_args.extra_args)
# Get any tags that need to be associated to the submitted task
# for upload the data
upload_part_tag = self._get_upload_task_tag(
upload_input_manager, 'upload_part'
)
size = transfer_future.meta.size
adjuster = ChunksizeAdjuster()
chunksize = adjuster.adjust_chunksize(config.multipart_chunksize, size)
part_iterator = upload_input_manager.yield_upload_part_bodies(
transfer_future, chunksize
)
for part_number, fileobj in part_iterator:
part_futures.append(
self._transfer_coordinator.submit(
request_executor,
UploadPartTask(
transfer_coordinator=self._transfer_coordinator,
main_kwargs={
'client': client,
'fileobj': fileobj,
'bucket': call_args.bucket,
'key': call_args.key,
'part_number': part_number,
'extra_args': extra_part_args,
},
pending_main_kwargs={
'upload_id': create_multipart_future
},
),
tag=upload_part_tag,
)
)
complete_multipart_extra_args = self._extra_complete_multipart_args(
call_args.extra_args
)
# Submit the request to complete the multipart upload.
self._transfer_coordinator.submit(
request_executor,
CompleteMultipartUploadTask(
transfer_coordinator=self._transfer_coordinator,
main_kwargs={
'client': client,
'bucket': call_args.bucket,
'key': call_args.key,
'extra_args': complete_multipart_extra_args,
},
pending_main_kwargs={
'upload_id': create_multipart_future,
'parts': part_futures,
},
is_final=True,
),
)
def _extra_upload_part_args(self, extra_args):
# Only the args in UPLOAD_PART_ARGS actually need to be passed
# onto the upload_part calls.
return get_filtered_dict(extra_args, self.UPLOAD_PART_ARGS)
def _extra_complete_multipart_args(self, extra_args):
return get_filtered_dict(extra_args, self.COMPLETE_MULTIPART_ARGS)
def _get_upload_task_tag(self, upload_input_manager, operation_name):
tag = None
if upload_input_manager.stores_body_in_memory(operation_name):
tag = IN_MEMORY_UPLOAD_TAG
return tag
class PutObjectTask(Task):
"""Task to do a nonmultipart upload"""
def _main(self, client, fileobj, bucket, key, extra_args):
"""
:param client: The client to use when calling PutObject
:param fileobj: The file to upload.
:param bucket: The name of the bucket to upload to
:param key: The name of the key to upload to
:param extra_args: A dictionary of any extra arguments that may be
used in the upload.
"""
with fileobj as body:
client.put_object(Bucket=bucket, Key=key, Body=body, **extra_args)
class UploadPartTask(Task):
"""Task to upload a part in a multipart upload"""
def _main(
self, client, fileobj, bucket, key, upload_id, part_number, extra_args
):
"""
:param client: The client to use when calling PutObject
:param fileobj: The file to upload.
:param bucket: The name of the bucket to upload to
:param key: The name of the key to upload to
:param upload_id: The id of the upload
:param part_number: The number representing the part of the multipart
upload
:param extra_args: A dictionary of any extra arguments that may be
used in the upload.
:rtype: dict
:returns: A dictionary representing a part::
{'Etag': etag_value, 'PartNumber': part_number}
This value can be appended to a list to be used to complete
the multipart upload.
"""
with fileobj as body:
response = client.upload_part(
Bucket=bucket,
Key=key,
UploadId=upload_id,
PartNumber=part_number,
Body=body,
**extra_args
)
etag = response['ETag']
part_metadata = {'ETag': etag, 'PartNumber': part_number}
if 'ChecksumAlgorithm' in extra_args:
algorithm_name = extra_args['ChecksumAlgorithm'].upper()
checksum_member = f'Checksum{algorithm_name}'
if checksum_member in response:
part_metadata[checksum_member] = response[checksum_member]
return part_metadata
| |
""" Utility functions for sparse matrix module
"""
import sys
import operator
import warnings
import numpy as np
from scipy._lib._util import prod
__all__ = ['upcast', 'getdtype', 'isscalarlike', 'isintlike',
'isshape', 'issequence', 'isdense', 'ismatrix', 'get_sum_dtype']
supported_dtypes = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc,
np.uintc, np.int_, np.uint, np.longlong, np.ulonglong, np.single, np.double,
np.longdouble, np.csingle, np.cdouble, np.clongdouble]
_upcast_memo = {}
def upcast(*args):
"""Returns the nearest supported sparse dtype for the
combination of one or more types.
upcast(t0, t1, ..., tn) -> T where T is a supported dtype
Examples
--------
>>> upcast('int32')
<type 'numpy.int32'>
>>> upcast('bool')
<type 'numpy.bool_'>
>>> upcast('int32','float32')
<type 'numpy.float64'>
>>> upcast('bool',complex,float)
<type 'numpy.complex128'>
"""
t = _upcast_memo.get(hash(args))
if t is not None:
return t
upcast = np.find_common_type(args, [])
for t in supported_dtypes:
if np.can_cast(upcast, t):
_upcast_memo[hash(args)] = t
return t
raise TypeError('no supported conversion for types: %r' % (args,))
def upcast_char(*args):
"""Same as `upcast` but taking dtype.char as input (faster)."""
t = _upcast_memo.get(args)
if t is not None:
return t
t = upcast(*map(np.dtype, args))
_upcast_memo[args] = t
return t
def upcast_scalar(dtype, scalar):
"""Determine data type for binary operation between an array of
type `dtype` and a scalar.
"""
return (np.array([0], dtype=dtype) * scalar).dtype
def downcast_intp_index(arr):
"""
Down-cast index array to np.intp dtype if it is of a larger dtype.
Raise an error if the array contains a value that is too large for
intp.
"""
if arr.dtype.itemsize > np.dtype(np.intp).itemsize:
if arr.size == 0:
return arr.astype(np.intp)
maxval = arr.max()
minval = arr.min()
if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min:
raise ValueError("Cannot deal with arrays with indices larger "
"than the machine maximum address size "
"(e.g. 64-bit indices on 32-bit machine).")
return arr.astype(np.intp)
return arr
def to_native(A):
return np.asarray(A, dtype=A.dtype.newbyteorder('native'))
def getdtype(dtype, a=None, default=None):
"""Function used to simplify argument processing. If 'dtype' is not
specified (is None), returns a.dtype; otherwise returns a np.dtype
object created from the specified dtype argument. If 'dtype' and 'a'
are both None, construct a data type out of the 'default' parameter.
Furthermore, 'dtype' must be in 'allowed' set.
"""
# TODO is this really what we want?
if dtype is None:
try:
newdtype = a.dtype
except AttributeError:
if default is not None:
newdtype = np.dtype(default)
else:
raise TypeError("could not interpret data type")
else:
newdtype = np.dtype(dtype)
if newdtype == np.object_:
warnings.warn("object dtype is not supported by sparse matrices")
return newdtype
def get_index_dtype(arrays=(), maxval=None, check_contents=False):
"""
Based on input (integer) arrays `a`, determine a suitable index data
type that can hold the data in the arrays.
Parameters
----------
arrays : tuple of array_like
Input arrays whose types/contents to check
maxval : float, optional
Maximum value needed
check_contents : bool, optional
Whether to check the values in the arrays and not just their types.
Default: False (check only the types)
Returns
-------
dtype : dtype
Suitable index data type (int32 or int64)
"""
int32min = np.iinfo(np.int32).min
int32max = np.iinfo(np.int32).max
dtype = np.intc
if maxval is not None:
if maxval > int32max:
dtype = np.int64
if isinstance(arrays, np.ndarray):
arrays = (arrays,)
for arr in arrays:
arr = np.asarray(arr)
if not np.can_cast(arr.dtype, np.int32):
if check_contents:
if arr.size == 0:
# a bigger type not needed
continue
elif np.issubdtype(arr.dtype, np.integer):
maxval = arr.max()
minval = arr.min()
if minval >= int32min and maxval <= int32max:
# a bigger type not needed
continue
dtype = np.int64
break
return dtype
def get_sum_dtype(dtype):
"""Mimic numpy's casting for np.sum"""
if dtype.kind == 'u' and np.can_cast(dtype, np.uint):
return np.uint
if np.can_cast(dtype, np.int_):
return np.int_
return dtype
def isscalarlike(x):
"""Is x either a scalar, an array scalar, or a 0-dim array?"""
return np.isscalar(x) or (isdense(x) and x.ndim == 0)
def isintlike(x):
"""Is x appropriate as an index into a sparse matrix? Returns True
if it can be cast safely to a machine int.
"""
# Fast-path check to eliminate non-scalar values. operator.index would
# catch this case too, but the exception catching is slow.
if np.ndim(x) != 0:
return False
try:
operator.index(x)
except (TypeError, ValueError):
try:
loose_int = bool(int(x) == x)
except (TypeError, ValueError):
return False
if loose_int:
warnings.warn("Inexact indices into sparse matrices are deprecated",
DeprecationWarning)
return loose_int
return True
def isshape(x, nonneg=False):
"""Is x a valid 2-tuple of dimensions?
If nonneg, also checks that the dimensions are non-negative.
"""
try:
# Assume it's a tuple of matrix dimensions (M, N)
(M, N) = x
except Exception:
return False
else:
if isintlike(M) and isintlike(N):
if np.ndim(M) == 0 and np.ndim(N) == 0:
if not nonneg or (M >= 0 and N >= 0):
return True
return False
def issequence(t):
return ((isinstance(t, (list, tuple)) and
(len(t) == 0 or np.isscalar(t[0]))) or
(isinstance(t, np.ndarray) and (t.ndim == 1)))
def ismatrix(t):
return ((isinstance(t, (list, tuple)) and
len(t) > 0 and issequence(t[0])) or
(isinstance(t, np.ndarray) and t.ndim == 2))
def isdense(x):
return isinstance(x, np.ndarray)
def validateaxis(axis):
if axis is not None:
axis_type = type(axis)
# In NumPy, you can pass in tuples for 'axis', but they are
# not very useful for sparse matrices given their limited
# dimensions, so let's make it explicit that they are not
# allowed to be passed in
if axis_type == tuple:
raise TypeError(("Tuples are not accepted for the 'axis' "
"parameter. Please pass in one of the "
"following: {-2, -1, 0, 1, None}."))
# If not a tuple, check that the provided axis is actually
# an integer and raise a TypeError similar to NumPy's
if not np.issubdtype(np.dtype(axis_type), np.integer):
raise TypeError("axis must be an integer, not {name}"
.format(name=axis_type.__name__))
if not (-2 <= axis <= 1):
raise ValueError("axis out of range")
def check_shape(args, current_shape=None):
"""Imitate numpy.matrix handling of shape arguments"""
if len(args) == 0:
raise TypeError("function missing 1 required positional argument: "
"'shape'")
elif len(args) == 1:
try:
shape_iter = iter(args[0])
except TypeError:
new_shape = (operator.index(args[0]), )
else:
new_shape = tuple(operator.index(arg) for arg in shape_iter)
else:
new_shape = tuple(operator.index(arg) for arg in args)
if current_shape is None:
if len(new_shape) != 2:
raise ValueError('shape must be a 2-tuple of positive integers')
elif new_shape[0] < 0 or new_shape[1] < 0:
raise ValueError("'shape' elements cannot be negative")
else:
# Check the current size only if needed
current_size = prod(current_shape)
# Check for negatives
negative_indexes = [i for i, x in enumerate(new_shape) if x < 0]
if len(negative_indexes) == 0:
new_size = prod(new_shape)
if new_size != current_size:
raise ValueError('cannot reshape array of size {} into shape {}'
.format(current_size, new_shape))
elif len(negative_indexes) == 1:
skip = negative_indexes[0]
specified = prod(new_shape[0:skip] + new_shape[skip+1:])
unspecified, remainder = divmod(current_size, specified)
if remainder != 0:
err_shape = tuple('newshape' if x < 0 else x for x in new_shape)
raise ValueError('cannot reshape array of size {} into shape {}'
''.format(current_size, err_shape))
new_shape = new_shape[0:skip] + (unspecified,) + new_shape[skip+1:]
else:
raise ValueError('can only specify one unknown dimension')
if len(new_shape) != 2:
raise ValueError('matrix shape must be two-dimensional')
return new_shape
def check_reshape_kwargs(kwargs):
"""Unpack keyword arguments for reshape function.
This is useful because keyword arguments after star arguments are not
allowed in Python 2, but star keyword arguments are. This function unpacks
'order' and 'copy' from the star keyword arguments (with defaults) and
throws an error for any remaining.
"""
order = kwargs.pop('order', 'C')
copy = kwargs.pop('copy', False)
if kwargs: # Some unused kwargs remain
raise TypeError('reshape() got unexpected keywords arguments: {}'
.format(', '.join(kwargs.keys())))
return order, copy
def is_pydata_spmatrix(m):
"""
Check whether object is pydata/sparse matrix, avoiding importing the module.
"""
base_cls = getattr(sys.modules.get('sparse'), 'SparseArray', None)
return base_cls is not None and isinstance(m, base_cls)
###############################################################################
# Wrappers for NumPy types that are deprecated
def matrix(*args, **kwargs):
with warnings.catch_warnings(record=True):
warnings.filterwarnings(
'ignore', '.*the matrix subclass is not the recommended way.*')
return np.matrix(*args, **kwargs)
def asmatrix(*args, **kwargs):
with warnings.catch_warnings(record=True):
warnings.filterwarnings(
'ignore', '.*the matrix subclass is not the recommended way.*')
return np.asmatrix(*args, **kwargs)
def bmat(*args, **kwargs):
with warnings.catch_warnings(record=True):
warnings.filterwarnings(
'ignore', '.*the matrix subclass is not the recommended way.*')
return np.bmat(*args, **kwargs)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TfLite BasicRnnCell wrapper.
TODO(renjieliu): Find a better home for this one.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import tensorflow.lite.python.op_hint as op_hint
from tensorflow.python.keras import activations
from tensorflow.python.keras import initializers
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
@tf_export("lite.experimental.nn.TfLiteRNNCell")
class TfLiteRNNCell(rnn_cell_impl.LayerRNNCell):
"""The most basic RNN cell.
This is used only for TfLite, it provides hints and it also makes the
variables in the desired for the tflite ops.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
"""Initializes the parameters for an RNN cell.
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`. It could also be string
that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. Raises an error if not `True` and the existing scope
already has the given variables.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
Raises:
ValueError: If the existing scope already has the given variables.
"""
super(TfLiteRNNCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
# Inputs must be Rank-2.
self.input_spec = base_layer.InputSpec(ndim=2)
self._tflite_wrapper = op_hint.OpHint("UnidirectionalSequenceRnn")
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def build(self, inputs_shape):
"""Builds the RNN cell.
Args:
inputs_shape: Rnn input tensor shape.
Raises:
ValueError: If last dimension of the input shape is not known.
"""
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" %
(inputs_shape,))
input_depth = inputs_shape[-1]
def add_variable_wrapped(name, shape, initializer, index):
var = self.add_weight(name, shape=shape, initializer=initializer)
return self._tflite_wrapper.add_input(
var, name=name, index_override=index)
self._input_weights = add_variable_wrapped(
"input_weights", [self._num_units, input_depth], None, 1)
self._recurrent_weights = add_variable_wrapped(
"recurrent_weights", [self._num_units, self._num_units], None, 2)
self._bias = add_variable_wrapped(
"bias",
shape=[self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype),
index=3)
self.built = True
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
inputs = self._tflite_wrapper.add_input(
inputs, tag="input", name="input", aggregate="stack", index_override=0)
state = self._tflite_wrapper.add_input(
state,
tag="hidden_state",
name="hidden_state",
aggregate="first",
index_override=4)
weights = array_ops.transpose(
array_ops.concat([self._input_weights, self._recurrent_weights], 1))
gate_inputs = math_ops.matmul(array_ops.concat([inputs, state], 1), weights)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
output = self._tflite_wrapper.add_output(
output,
tag="output",
name="output",
index_override=1,
aggregate="stack")
return output, output
def get_config(self):
config = {
"num_units": self._num_units,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(TfLiteRNNCell, self).get_config()
return dict(itertools.chain(base_config.items(), config.items()))
@tf_export("lite.experimental.nn.TFLiteLSTMCell")
class TFLiteLSTMCell(rnn_cell_impl.LayerRNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
This is used only for TfLite, it provides hints and it also makes the
variables in the desired for the tflite ops (transposed and seaparated).
The default non-peephole implementation is based on:
https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf
Felix Gers, Jurgen Schmidhuber, and Fred Cummins.
"Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
"""
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
num_unit_shards=None,
num_proj_shards=None,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1 in
order to reduce the scale of forgetting at the beginning of the
training. Must set it manually to `0.0` when restoring from CudnnLSTM
trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of the
`c_state` and `m_state`. If False, they are concatenated along the
column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`. When
restoring from CudnnLSTM-trained checkpoints, use
`CudnnCompatibleLSTMCell` instead.
"""
super(TFLiteLSTMCell, self).__init__(_reuse=reuse, name=name, dtype=dtype)
# TODO(raziel): decide if we want to just support tuples (yes please!).
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warn(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
# Inputs must be 2-dimensional.
# TODO(raziel): layers stuff -- chop if un-layerizing Op.
self.input_spec = base_layer.InputSpec(ndim=2)
self._tflite_wrapper = op_hint.OpHint("UnidirectionalSequenceLstm")
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
self._output_size = num_proj if num_proj else num_units
self._state_size = (
rnn_cell_impl.LSTMStateTuple(num_units, self._output_size)
if state_is_tuple else num_units + self._output_size)
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def build(self, inputs_shape):
"""Build TfLite LSTM cell graph.
Args:
inputs_shape: The inputs_shape must be known, and is [batch_size,
input_size] shape.
Raises:
ValueError: if the inputs_shape is invalid.
"""
if len(inputs_shape) != 2:
raise ValueError(
"inputs_shape must be 2-dimensional, saw shape: %s" % inputs_shape)
input_depth = (
inputs_shape[1]
if isinstance(inputs_shape[1], int) else inputs_shape[1].value)
if input_depth is None:
raise ValueError("Invalid inputs_shape, saw shape: %s" % inputs_shape)
maybe_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_unit_shards)
if self._num_unit_shards is not None else None)
input_weight_shape = [self._num_units, input_depth]
cell_weight_shape = [self._num_units, self._output_size]
bias_shape = [self._num_units]
def add_variable_wrapped(name, shape, initializer, index, partitioner):
var = self.add_weight(
name, shape=shape, initializer=initializer, partitioner=partitioner)
return self._tflite_wrapper.add_input(
var, name=name, index_override=index)
weight_initializer = self._initializer
if self.dtype is None:
bias_initializer = init_ops.zeros_initializer
else:
bias_initializer = init_ops.zeros_initializer(dtype=self.dtype)
forget_bias_initializer = init_ops.constant_initializer(self._forget_bias)
self.input_to_input_w = add_variable_wrapped(
"input_to_input_w", input_weight_shape, weight_initializer, 1,
maybe_partitioner)
self.input_to_forget_w = add_variable_wrapped(
"input_to_forget_w", input_weight_shape, weight_initializer, 2,
maybe_partitioner)
self.input_to_cell_w = add_variable_wrapped(
"input_to_cell_w", input_weight_shape, weight_initializer, 3,
maybe_partitioner)
self.input_to_output_w = add_variable_wrapped(
"input_to_output_w", input_weight_shape, weight_initializer, 4,
maybe_partitioner)
self.cell_to_input_w = add_variable_wrapped(
"cell_to_input_w", cell_weight_shape, weight_initializer, 5,
maybe_partitioner)
self.cell_to_forget_w = add_variable_wrapped(
"cell_to_forget_w", cell_weight_shape, weight_initializer, 6,
maybe_partitioner)
self.cell_to_cell_w = add_variable_wrapped(
"cell_to_cell_w", cell_weight_shape, weight_initializer, 7,
maybe_partitioner)
self.cell_to_output_w = add_variable_wrapped(
"cell_to_output_w", cell_weight_shape, weight_initializer, 8,
maybe_partitioner)
self.input_bias = add_variable_wrapped(
"input_bias", bias_shape, bias_initializer, 12, maybe_partitioner)
self.forget_bias = add_variable_wrapped("forget_bias", bias_shape,
forget_bias_initializer, 13,
maybe_partitioner)
self.cell_bias = add_variable_wrapped(
"cell_bias", bias_shape, bias_initializer, 14, maybe_partitioner)
self.output_bias = add_variable_wrapped(
"output_bias", bias_shape, bias_initializer, 15, maybe_partitioner)
# index 9, 10, 11.
# f stands for forget, i stands for input and o stands for output.
if self._use_peepholes:
self._w_f_diag = add_variable_wrapped("w_f_diag", [self._num_units],
self._initializer, 10,
maybe_partitioner)
self._w_i_diag = add_variable_wrapped("w_i_diag", [self._num_units],
self._initializer, 9,
maybe_partitioner)
self._w_o_diag = add_variable_wrapped("w_o_diag", [self._num_units],
self._initializer, 11,
maybe_partitioner)
# index 16 for proj kernel.
if self._num_proj is not None:
maybe_proj_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_proj_shards)
if self._num_proj_shards is not None else None)
self._proj_kernel = add_variable_wrapped(
"projection/kernel", [self._num_proj, self._num_units],
self._initializer,
16,
partitioner=maybe_proj_partitioner)
self.built = True
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, `[batch, num_units]`.
state: if `state_is_tuple` is False, this must be a state Tensor, `2-D,
[batch, state_size]`. If `state_is_tuple` is True, this must be a tuple
of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
inputs = self._tflite_wrapper.add_input(
inputs, tag="input", name="input", aggregate="stack", index_override=0)
# Make sure inputs and bias_initializer has the same type.
assert inputs.dtype == self.input_to_input_w.dtype
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
# Note: For TfLite, cell_state is at index 19 while activation state at
# index 18.
c_prev = self._tflite_wrapper.add_input(
c_prev,
tag="c_prev",
name="c_prev",
aggregate="first",
index_override=19)
m_prev = self._tflite_wrapper.add_input(
m_prev,
tag="m_prev",
name="m_prev",
aggregate="first",
index_override=18)
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
inputs_and_m_prev = array_ops.concat([inputs, m_prev], axis=1)
# i stands for input gate.
# f stands for forget gate activation.
# o outputs.
# j output of LSTM unit.
# c is the final state.
# m is the output.
i = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_input_w, self.cell_to_input_w],
axis=1),
transpose_b=True), self.input_bias)
f = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_forget_w, self.cell_to_forget_w],
axis=1),
transpose_b=True), self.forget_bias)
o = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_output_w, self.cell_to_output_w],
axis=1),
transpose_b=True), self.output_bias)
j = nn_ops.bias_add(
math_ops.matmul(
inputs_and_m_prev,
array_ops.concat([self.input_to_cell_w, self.cell_to_cell_w],
axis=1),
transpose_b=True), self.cell_bias)
# Diagonal connections
if self._use_peepholes:
c = (
sigmoid(f + self._w_f_diag * c_prev) * c_prev +
sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f) * c_prev + sigmoid(i) * self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
transposed_proj_kernel = array_ops.transpose(self._proj_kernel)
m = math_ops.matmul(m, transposed_proj_kernel)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
c = self._tflite_wrapper.add_output(
c, tag="c", name="c", aggregate="last", index_override=1)
m = self._tflite_wrapper.add_output(
m, tag="m", name="m", index_override=2, aggregate="stack")
new_state = (
rnn_cell_impl.LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"use_peepholes": self._use_peepholes,
"cell_clip": self._cell_clip,
"initializer": initializers.serialize(self._initializer),
"num_proj": self._num_proj,
"proj_clip": self._proj_clip,
"num_unit_shards": self._num_unit_shards,
"num_proj_shards": self._num_proj_shards,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(TFLiteLSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Base classes for artifacts.
Artifacts are classes that describe a system artifact. They describe a number
of key properties about the artifact:
Collectors: How to collect it from the client.
Processors: How to process the data from the client.
Storage: How to store the processed data.
"""
from grr.lib import parsers
from grr.lib import registry
from grr.lib import type_info
class Error(Exception):
"""Base exception."""
class ArtifactDefinitionError(Error):
"""Artifact was not well defined."""
class ConditionError(Error):
"""A condition was called that cannot be decided."""
class ArtifactProcessingError(Error):
"""An artifact could not be processed."""
# These labels represent the full set of labels that an Artifact can have.
# This set is tested on creation to ensure our list of labels doesn't get out
# of hand.
# Labels are used to logicaly group Artifacts for ease of use.
ARTIFACT_LABELS = [
"Execution", # Contain execution events.
"Logs", # Contain log files.
"Ext Media", # Contain external media data or events e.g. (USB drives)
"Network", # Describe networking state.
"Auth", # Authentication artifacts.
"Software" # Installed software.
]
class Collector(object):
"""A wrapper class to define an object for collecting data."""
def __init__(self, action, conditions=None, args=None):
self.action = action
self.args = args or {}
self.conditions = conditions or []
class AFF4ResultWriter(object):
"""A wrapper class to allow writing objects to the AFF4 space."""
def __init__(self, path, aff4_type, aff4_attribute, mode):
self.path = path
self.aff4_type = aff4_type
self.aff4_attribute = aff4_attribute
self.mode = mode
class Artifact(object):
"""Base class for artifact objects.
All Artifacts must define a Collect, and a Process class method.
An Artifact Collector will collect and process the artifact by calling these
methods.
The base class implements no real functionality. As a general rule most things
should inherit from GeneralArtifact instead.
"""
# Register a metaclass registry to track all artifacts.
__metaclass__ = registry.MetaclassRegistry
DESCRIPTION = "Abstract Artifact"
LABELS = [] # A list of labels that describe what the artifact provides.
def Collect(self):
pass
def Process(self, responses):
pass
class GenericArtifact(Artifact):
"""A generalized Artifact that executes based on class variables.
Artifacts must be processed by an ArtifactCollectorFlow.
WARNING: The artifact object is re-instantiated between the Collect and
Process. State is not preserved.
"""
# Prevents this from automatically registering.
__abstract = True # pylint: disable=g-bad-name
# Which OS are supported by the Artifact e.g. Linux, Windows, Darwin
# Note that this can be implemented by CONDITIONS as well, but this
# provides a more obvious interface for users for common cases.
SUPPORTED_OS = []
# List of ArtifactCondition function names that define whether Artifact
# collection should run. These operate as an AND operator, all conditions
# must pass for it to run. OR operators should be implemented as their own
# conditions.
CONDITIONS = []
LABELS = []
# A list of Collector objects.
COLLECTORS = []
# A dict to use for path interpolation.
PATH_ARGS = {}
def Validate(self):
"""Attempt to validate the artifact has been well defined.
This is used to enforce Artifact rules.
Raises:
ArtifactDefinitionError: If COLLECTORS object is invalid.
"""
cls_name = self.__class__.__name__
if not self.__doc__:
raise ArtifactDefinitionError("Artifact %s has missing doc string" %
cls_name)
for supp_os in self.SUPPORTED_OS:
if supp_os not in SUPPORTED_OS_MAP:
raise ArtifactDefinitionError("Artifact %s has invalid SUPPORTED_OS %s"
% (cls_name, supp_os))
for condition in self.CONDITIONS:
if not hasattr(condition, "__call__"):
raise ArtifactDefinitionError("Artifact %s has invalid condition %s" %
(cls_name, condition))
for collector in self.COLLECTORS:
if not hasattr(collector.conditions, "__iter__"):
raise ArtifactDefinitionError("Artifact %s collector has invalid"
" conditions %s" %
(cls_name, collector.conditions))
for label in self.LABELS:
if label not in ARTIFACT_LABELS:
raise ArtifactDefinitionError("Artifact %s has an invalid label %s."
" Please use one from ARTIFACT_LABELS."
% (cls_name, label))
if hasattr(self, "PROCESSOR"):
processor = parsers.Parser.classes.get(self.PROCESSOR)
if not processor:
raise ArtifactDefinitionError("Artifact %s has an invalid processor %s."
" The processor must be registered as a"
" parser."
% (cls_name, self.PROCESSOR))
if (not hasattr(processor, "out_type")
or processor.out_type not in GRRArtifactMappings.rdf_map):
raise ArtifactDefinitionError("Artifact %s has a a process with an"
" output_type %s which is not in the "
" GRRArtifactMappings."
% (cls_name, processor.out_type))
@classmethod
def GetDescription(cls):
return cls.__doc__.split("\n")[0]
def IsLinux(client):
return client.Get(client.Schema.SYSTEM) == "Linux"
def IsDarwin(client):
return client.Get(client.Schema.SYSTEM) == "Darwin"
def IsWindows(client):
return client.Get(client.Schema.SYSTEM) == "Windows"
SUPPORTED_OS_MAP = {
"Windows": IsWindows,
"Linux": IsLinux,
"Darwin": IsDarwin
}
class ArtifactList(type_info.TypeInfoObject):
"""A list of Artifacts names."""
renderer = "ArtifactListRenderer"
def Validate(self, value):
"""Value must be a list of artifact names."""
try:
iter(value)
except TypeError:
raise type_info.TypeValueError(
"%s not a valid iterable for ArtifactList" % value)
for val in value:
if not isinstance(val, basestring):
raise type_info.TypeValueError("%s not a valid instance string." % val)
artifact_cls = Artifact.classes.get(val)
if not artifact_cls or not issubclass(artifact_cls, Artifact):
raise type_info.TypeValueError("%s not a valid Artifact class." % val)
return value
class GRRArtifactMappings(object):
"""SemanticProto to AFF4 storage mappings.
Class defining mappings between RDFValues collected by Artifacts, and the
location they are stored in the AFF4 hierarchy.
Each entry in the map contains:
1. Location stored relative to the client.
2. Name of the AFF4 type.
3. Name of the attribute to be changed.
4. Method for adding the RDFValue to the Attribute (Set, Append)
"""
rdf_map = {
"SoftwarePackage": ("info/software", "InstalledSoftwarePackages",
"INSTALLED_PACKAGES", "Append")
}
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Differentially private optimizers.
"""
from __future__ import division
import tensorflow as tf
from differential_privacy.dp_sgd.dp_optimizer import utils
from differential_privacy.dp_sgd.per_example_gradients import per_example_gradients
class DPGradientDescentOptimizer(tf.train.GradientDescentOptimizer):
"""Differentially private gradient descent optimizer.
"""
def __init__(self, learning_rate, eps_delta, sanitizer,
sigma=None, use_locking=False, name="DPGradientDescent",
batches_per_lot=1):
"""Construct a differentially private gradient descent optimizer.
The optimizer uses fixed privacy budget for each batch of training.
Args:
learning_rate: for GradientDescentOptimizer.
eps_delta: EpsDelta pair for each epoch.
sanitizer: for sanitizing the graident.
sigma: noise sigma. If None, use eps_delta pair to compute sigma;
otherwise use supplied sigma directly.
use_locking: use locking.
name: name for the object.
batches_per_lot: Number of batches in a lot.
"""
super(DPGradientDescentOptimizer, self).__init__(learning_rate,
use_locking, name)
# Also, if needed, define the gradient accumulators
self._batches_per_lot = batches_per_lot
self._grad_accum_dict = {}
if batches_per_lot > 1:
self._batch_count = tf.Variable(1, dtype=tf.int32, trainable=False,
name="batch_count")
var_list = tf.trainable_variables()
with tf.variable_scope("grad_acc_for"):
for var in var_list:
v_grad_accum = tf.Variable(tf.zeros_like(var),
trainable=False,
name=utils.GetTensorOpName(var))
self._grad_accum_dict[var.name] = v_grad_accum
self._eps_delta = eps_delta
self._sanitizer = sanitizer
self._sigma = sigma
def compute_sanitized_gradients(self, loss, var_list=None,
add_noise=True):
"""Compute the sanitized gradients.
Args:
loss: the loss tensor.
var_list: the optional variables.
add_noise: if true, then add noise. Always clip.
Returns:
a pair of (list of sanitized gradients) and privacy spending accumulation
operations.
Raises:
TypeError: if var_list contains non-variable.
"""
self._assert_valid_dtypes([loss])
xs = [tf.convert_to_tensor(x) for x in var_list]
px_grads = per_example_gradients.PerExampleGradients(loss, xs)
sanitized_grads = []
for px_grad, v in zip(px_grads, var_list):
tensor_name = utils.GetTensorOpName(v)
sanitized_grad = self._sanitizer.sanitize(
px_grad, self._eps_delta, sigma=self._sigma,
tensor_name=tensor_name, add_noise=add_noise,
num_examples=self._batches_per_lot * tf.slice(
tf.shape(px_grad), [0], [1]))
sanitized_grads.append(sanitized_grad)
return sanitized_grads
def minimize(self, loss, global_step=None, var_list=None,
name=None):
"""Minimize using sanitized gradients.
This gets a var_list which is the list of trainable variables.
For each var in var_list, we defined a grad_accumulator variable
during init. When batches_per_lot > 1, we accumulate the gradient
update in those. At the end of each lot, we apply the update back to
the variable. This has the effect that for each lot we compute
gradients at the point at the beginning of the lot, and then apply one
update at the end of the lot. In other words, semantically, we are doing
SGD with one lot being the equivalent of one usual batch of size
batch_size * batches_per_lot.
This allows us to simulate larger batches than our memory size would permit.
The lr and the num_steps are in the lot world.
Args:
loss: the loss tensor.
global_step: the optional global step.
var_list: the optional variables.
name: the optional name.
Returns:
the operation that runs one step of DP gradient descent.
"""
# First validate the var_list
if var_list is None:
var_list = tf.trainable_variables()
for var in var_list:
if not isinstance(var, tf.Variable):
raise TypeError("Argument is not a variable.Variable: %s" % var)
# Modification: apply gradient once every batches_per_lot many steps.
# This may lead to smaller error
if self._batches_per_lot == 1:
sanitized_grads = self.compute_sanitized_gradients(
loss, var_list=var_list)
grads_and_vars = zip(sanitized_grads, var_list)
self._assert_valid_dtypes([v for g, v in grads_and_vars if g is not None])
apply_grads = self.apply_gradients(grads_and_vars,
global_step=global_step, name=name)
return apply_grads
# Condition for deciding whether to accumulate the gradient
# or actually apply it.
# we use a private self_batch_count to keep track of number of batches.
# global step will count number of lots processed.
update_cond = tf.equal(tf.constant(0),
tf.mod(self._batch_count,
tf.constant(self._batches_per_lot)))
# Things to do for batches other than last of the lot.
# Add non-noisy clipped grads to shadow variables.
def non_last_in_lot_op(loss, var_list):
"""Ops to do for a typical batch.
For a batch that is not the last one in the lot, we simply compute the
sanitized gradients and apply them to the grad_acc variables.
Args:
loss: loss function tensor
var_list: list of variables
Returns:
A tensorflow op to do the updates to the gradient accumulators
"""
sanitized_grads = self.compute_sanitized_gradients(
loss, var_list=var_list, add_noise=False)
update_ops_list = []
for var, grad in zip(var_list, sanitized_grads):
grad_acc_v = self._grad_accum_dict[var.name]
update_ops_list.append(grad_acc_v.assign_add(grad))
update_ops_list.append(self._batch_count.assign_add(1))
return tf.group(*update_ops_list)
# Things to do for last batch of a lot.
# Add noisy clipped grads to accumulator.
# Apply accumulated grads to vars.
def last_in_lot_op(loss, var_list, global_step):
"""Ops to do for last batch in a lot.
For the last batch in the lot, we first add the sanitized gradients to
the gradient acc variables, and then apply these
values over to the original variables (via an apply gradient)
Args:
loss: loss function tensor
var_list: list of variables
global_step: optional global step to be passed to apply_gradients
Returns:
A tensorflow op to push updates from shadow vars to real vars.
"""
# We add noise in the last lot. This is why we need this code snippet
# that looks almost identical to the non_last_op case here.
sanitized_grads = self.compute_sanitized_gradients(
loss, var_list=var_list, add_noise=True)
normalized_grads = []
for var, grad in zip(var_list, sanitized_grads):
grad_acc_v = self._grad_accum_dict[var.name]
# To handle the lr difference per lot vs per batch, we divide the
# update by number of batches per lot.
normalized_grad = tf.div(grad_acc_v.assign_add(grad),
tf.to_float(self._batches_per_lot))
normalized_grads.append(normalized_grad)
with tf.control_dependencies(normalized_grads):
grads_and_vars = zip(normalized_grads, var_list)
self._assert_valid_dtypes(
[v for g, v in grads_and_vars if g is not None])
apply_san_grads = self.apply_gradients(grads_and_vars,
global_step=global_step,
name="apply_grads")
# Now reset the accumulators to zero
resets_list = []
with tf.control_dependencies([apply_san_grads]):
for _, acc in self._grad_accum_dict.items():
reset = tf.assign(acc, tf.zeros_like(acc))
resets_list.append(reset)
resets_list.append(self._batch_count.assign_add(1))
last_step_update = tf.group(*([apply_san_grads] + resets_list))
return last_step_update
# pylint: disable=g-long-lambda
update_op = tf.cond(update_cond,
lambda: last_in_lot_op(
loss, var_list,
global_step),
lambda: non_last_in_lot_op(
loss, var_list))
return tf.group(update_op)
| |
# Copyright (c) 2019, Matt Layman and contributors
try:
import yaml
LOAD_YAML = True
except ImportError: # pragma: no cover
LOAD_YAML = False
class Line(object):
"""Base type for TAP data.
TAP is a line based protocol. Thus, the most primitive type is a line.
"""
@property
def category(self):
raise NotImplementedError
class Result(Line):
"""Information about an individual test line."""
def __init__(
self,
ok,
number=None,
description="",
directive=None,
diagnostics=None,
raw_yaml_block=None,
):
self._ok = ok
if number:
self._number = int(number)
else:
# The number may be an empty string so explicitly set to None.
self._number = None
self._description = description
self.directive = directive
self.diagnostics = diagnostics
self._yaml_block = raw_yaml_block
@property
def category(self):
""":returns: ``test``"""
return "test"
@property
def ok(self):
"""Get the ok status.
:rtype: bool
"""
return self._ok
@property
def number(self):
"""Get the test number.
:rtype: int
"""
return self._number
@property
def description(self):
"""Get the description."""
return self._description
@property
def skip(self):
"""Check if this test was skipped.
:rtype: bool
"""
return self.directive.skip
@property
def todo(self):
"""Check if this test was a TODO.
:rtype: bool
"""
return self.directive.todo
@property
def yaml_block(self):
"""Lazy load a yaml_block.
If yaml support is not available,
there is an error in parsing the yaml block,
or no yaml is associated with this result,
``None`` will be returned.
:rtype: dict
"""
if LOAD_YAML and self._yaml_block is not None:
try:
yaml_dict = yaml.load(self._yaml_block, Loader=yaml.SafeLoader)
return yaml_dict
except yaml.error.YAMLError:
print("Error parsing yaml block. Check formatting.")
return None
def __str__(self):
is_not = ""
if not self.ok:
is_not = "not "
directive = ""
if self.directive is not None and self.directive.text:
directive = " # {0}".format(self.directive.text)
diagnostics = ""
if self.diagnostics is not None:
diagnostics = "\n" + self.diagnostics.rstrip()
return "{0}ok {1} {2}{3}{4}".format(
is_not, self.number, self.description, directive, diagnostics
)
class Plan(Line):
"""A plan line to indicate how many tests to expect."""
def __init__(self, expected_tests, directive=None):
self._expected_tests = expected_tests
self.directive = directive
@property
def category(self):
""":returns: ``plan``"""
return "plan"
@property
def expected_tests(self):
"""Get the number of expected tests.
:rtype: int
"""
return self._expected_tests
@property
def skip(self):
"""Check if this plan should skip the file.
:rtype: bool
"""
return self.directive.skip
class Diagnostic(Line):
"""A diagnostic line (i.e. anything starting with a hash)."""
def __init__(self, text):
self._text = text
@property
def category(self):
""":returns: ``diagnostic``"""
return "diagnostic"
@property
def text(self):
"""Get the text."""
return self._text
class Bail(Line):
"""A bail out line (i.e. anything starting with 'Bail out!')."""
def __init__(self, reason):
self._reason = reason
@property
def category(self):
""":returns: ``bail``"""
return "bail"
@property
def reason(self):
"""Get the reason."""
return self._reason
class Version(Line):
"""A version line (i.e. of the form 'TAP version 13')."""
def __init__(self, version):
self._version = version
@property
def category(self):
""":returns: ``version``"""
return "version"
@property
def version(self):
"""Get the version number.
:rtype: int
"""
return self._version
class Unknown(Line):
"""A line that represents something that is not a known TAP line.
This exists for the purpose of a Null Object pattern.
"""
@property
def category(self):
""":returns: ``unknown``"""
return "unknown"
| |
import editdistance
from timeit import default_timer as timer
from operators import add_extract
from prune_rules import invalid_node, unlikely_introduce_symbols
from collections import defaultdict
import Levenshtein
from collections import Counter
import numpy as np
import string
import itertools
from foofah_table_graph import TableGraph
import foofah_utils
MAX_TABLE_OPS = 3
MAX_SYNTAX = 4
CPP = True
NODE_COUNTER = {'nodes': 0}
alphanumeric_set = set(string.ascii_letters) | set(string.digits)
symbol_set = set(string.punctuation) | set(string.whitespace)
def median(lst):
sorted_list = sorted(lst)
list_len = len(lst)
index = (list_len - 1) // 2
if list_len % 2:
return sorted_list[index]
else:
return (sorted_list[index] + sorted_list[index + 1]) / 2.0
def is_messy_subset(target, contents):
content_str = chr(0).join(contents)
return_val = True
for t in target:
if t not in content_str:
return_val = False
break
return return_val
def count_matches(target, contents):
content_str = chr(0).join(contents)
target_str = chr(0).join(target)
count = 0
for c in contents:
if c == '' or c not in target_str:
target_in_here = False
for t in target:
if t in c:
target_in_here = True
break
if not target_in_here:
count += 1 # this is a drop
for t in target:
if t in content_str and t not in contents:
count += 2 # this is a split + at least one drop
return count
class FoofahNode:
pred_model = None
f_score = 0
g_score = 0
f_hash = None
# Carry-on Properties: can be used directly by other functions, which helps reducing the amount of work when
# evaluating multiple pruning rules.
prop_num_rows = 0
prop_num_cols = 0
prop_col_char = None
prop_if_col_contains_empty_cells = None
def __init__(self, contents, op, parent, times={}, node_counter=NODE_COUNTER, h_debug=False):
self.node_id = node_counter['nodes']
self.contents = contents
self.parent = parent
self.operation = op
self.h_debug = h_debug
if self.parent:
self.depth = self.parent.depth + 1
else:
self.depth = 0
self.f_score = 0
self.g_score = 0
self.f_hash = hash(str(self.contents))
self.prop_chars = alphanumeric_set & set(str(self.contents))
self.prop_symbols = symbol_set & set(str(''.join(y for x in self.contents for y in x)))
# Set of cell data in current table
self.prop_data = set(itertools.chain(*self.contents))
if "" in self.prop_data:
self.prop_data.remove("")
self.prop_cols, self.prop_col_data = get_cols_from_table(self.contents)
self.col_hash = sorted(self.prop_cols)
self.num_rows = len(self.contents)
self.num_cols = len(self.contents[0])
if len(times) == 0:
times['children'] = []
times['scores'] = []
times['ops'] = {}
times['prune'] = []
times['prune2'] = []
times['child_obj'] = []
times['loop'] = []
self.times = times
self.h_score = -1
node_counter['nodes'] += 1
if self.parent is not None and self.parent.operation[0]['name'] == 'split1 ]' and op[0]['name'] == 'append ]':
print self.parent.operation[0]['name'], op[0]['name']
self.confidence = 1.0
# @profile
def make_children(self, ops, debug=False, bound=float("inf"), p1=True, p2=True, p3=True):
start = timer()
num_cols = len(self.contents[0])
children = []
if self.depth == bound:
return children
temp_ops = list(ops)
ops = temp_ops
# Checking if all cell data from output table exist in current table.
# If they are, remove all operators for syntax transformation
if not table_values_is_subset(FoofahNode.goal_node, self):
# A brief estimate of whether extract should be added
ops += add_extract(self.contents, FoofahNode.goal_node.contents, cur_node=self, goal_node=FoofahNode.goal_node)
# Each operation takes in a column index, so we need to apply
# each op to every possible column in the current state
for op in ops:
# For those table level operations that do not need a column parameter
if not op['if_col']:
params = op['params']
op_obj = (op, None, dict(params))
result = op['fxn'](self.contents)
child = self.make_child_node(result, self, op_obj, p1, p2, p3)
if child:
children.append(child)
else:
# Try different columns for other operations
for i in range(num_cols):
result = op['fxn'](self.contents, i)
params = op['params']
params[1] = str(i)
op_obj = (op, i, dict(params))
child = self.make_child_node(result, self, op_obj, p1, p2, p3)
if child:
children.append(child)
self.times['children'].append(timer() - start)
return children
def make_child_node(self, table, parent_node, op_obj, p1=True, p2=True, p3=True):
if table is None or len(table) == 0:
return None
child = FoofahNode(table, op_obj, parent_node, parent_node.times)
if p1 and unlikely_introduce_symbols(child, self, FoofahNode.goal_node) or (p2 and invalid_node(child, FoofahNode.goal_node)) or (p3 and child.identical(parent_node)):
return None
return child
def get_h_score(self, batch=True):
return self.get_any_dist(self, FoofahNode.goal_node, batch)
def get_any_dist(self, node_a, node_b, batch=True):
if CPP:
a = foofah_utils.TableGraph(node_a.num_rows, node_a.num_cols)
for i, row in enumerate(node_a.contents):
for j, cell in enumerate(row):
a.addCell(cell, i, j)
b = foofah_utils.TableGraph(node_b.num_rows, node_b.num_cols)
for i, row in enumerate(node_b.contents):
for j, cell in enumerate(row):
b.addCell(cell, i, j)
if batch:
return foofah_utils.get_ged_batch(a, b, False)
return foofah_utils.get_ged(a, b)
a = TableGraph(node_a.contents)
b = TableGraph(node_b.contents)
return a.graph_edit_distance_greedy(b, batch)[1]
def get_h_score_intuitive(self, debug=False):
# STEP 0
# There are some scenarios where we know for sure how to solve
#
# Table is exactly the same
if self == FoofahNode.goal_node:
self.confidence = float('inf')
return 0
syntax = 0
layout = 0
clean = 0
# Usually 1 syntax transformation work on a column
for col_data in self.prop_col_data:
for data in col_data:
if data not in FoofahNode.goal_node.prop_data:
syntax += 1
continue
if self.num_rows != FoofahNode.goal_node.num_rows:
layout += 1
return syntax + layout + clean
def get_h_score_rule(self, debug=False, heuristic_no=2):
if heuristic_no == 1:
return 1
elif heuristic_no == 2:
# If the current table is already the target table, nothing needs to be done
if self == FoofahNode.goal_node:
return 0
# number of rows are the same in the target table, current table and parent table
if self.num_rows == FoofahNode.goal_node.num_rows:
# return 1
h_vals = []
for i in xrange(self.num_rows):
h = self.get_row_h_score(i, debug)
h_vals.append(h)
if len(h_vals) > 1:
return median(h_vals)
else:
return h_vals[0]
else:
# We are almost sure that it is one of fold, unfold, transpose, fold_header and unfold_header,
# remove_empty or row concatenation if the table values are the same even though the table shapes
# are different
cost = 0
if self.prop_data != FoofahNode.goal_node.prop_data:
cost = 1
# Tranpose is needed
if self.num_rows == FoofahNode.goal_node.num_cols and self.num_cols == FoofahNode.goal_node.num_rows:
return cost + 1
# unfold, fold, row concatenation
elif self.num_rows % FoofahNode.goal_node.num_rows == 0 or self.num_rows % (
FoofahNode.goal_node.num_rows - 1) == 0:
return cost + 1
# fold or fold_header or row concate is needed
elif FoofahNode.goal_node.num_rows % self.num_rows == 0 or FoofahNode.goal_node.num_rows % (
self.num_rows - 1) == 0:
return cost + 1
# Could be remove_empty_rows
elif self.num_cols == FoofahNode.goal_node.num_cols:
return cost + 1
# Otherwise, 2 operations might be used
else:
return cost + 2
else:
h_vals = []
# number of rows are the same in the target table, current table and parent table
if (self.parent is None or len(self.contents) == len(self.parent.contents)) and len(
self.contents) == FoofahNode.goal_node.num_rows:
# If the current table is already the target table, nothing needs to be done
if self == FoofahNode.goal_node:
return 0
for i in xrange(len(self.contents)):
h = self.get_row_h_score(i, debug)
h_vals.append(h)
if len(h_vals) > 1:
h_vals.sort()
return np.max(h_vals) # h_vals[-1] #np.percentile(h_vals, 80)
else:
return h_vals[0]
else:
# If the current table is already the target table, nothing needs to be done
if self == FoofahNode.goal_node:
return 0
# We are almost sure that it is one of fold, unfold, transpose, fold_header and unfold_header, or remove_empty if the table values are the same even though the table shapes are different
elif table_of_same_values(self, FoofahNode.goal_node):
return 1
# Tranpose is needed
elif len(self.contents) == FoofahNode.goal_node.num_cols and self.num_cols == FoofahNode.goal_node.num_rows:
return 1 + self.get_row_h_score(0)
# unfold or unfold_header is needed
elif len(self.contents) % FoofahNode.goal_node.num_rows == 0 or len(self.contents) % (
FoofahNode.goal_node.num_rows - 1) == 0:
return 1 + self.get_row_h_score(0)
# fold or fold_header is needed
elif FoofahNode.goal_node.num_rows % len(self.contents) == 0 or FoofahNode.goal_node.num_rows % (
len(self.contents) - 1) == 0:
return 1 + self.get_row_h_score(0)
# We don't know what operations might be used
else:
return 2 + self.get_row_h_score(0)
def get_row_h_score(self, row_num=0, debug=False, heuristic_no=2):
H_DEBUG = self.h_debug
start = timer()
h_score_source = -1
if len(self.contents) == 0:
self.h_score = float("inf")
h_score_source = 0
if debug:
return self.h_score, h_score_source
else:
return self.h_score
row = self.contents[row_num]
targ_row = FoofahNode.goal_node.contents[row_num]
if heuristic_no == 2:
cur_row_count = Counter(row)
tar_row_count = Counter(targ_row)
h_score = 0
if cur_row_count == tar_row_count:
row_temp = list(row)
targ_row_temp = list(targ_row)
p = 0
while p < len(targ_row_temp) - 1:
if row_temp.index(targ_row_temp[p]) < row_temp.index(targ_row_temp[p + 1]):
row_temp.remove(targ_row_temp[p])
p += 1
else:
break
h_score = len(targ_row_temp) - p - 1
# We are in a more complex situation where more fine grained analysis is needed.
else:
# Find intersection of two cell data
cur_row_temp = list(row)
tar_row_temp = list(targ_row)
same_cell_data = set(cur_row_temp) & set(tar_row_temp)
# Remove null
if "" in same_cell_data:
same_cell_data.remove("")
# Figure out how many drops or copies are needed. We don't consider if move operations are needed at
# this moment. We will posepone considering fill and divide.
for item in same_cell_data:
h_score += abs(cur_row_count[item] - tar_row_count[item])
# Remove the cells that have already been considered
cur_row_temp = [x for x in cur_row_temp if x not in same_cell_data]
tar_row_temp = [x for x in tar_row_temp if x not in same_cell_data]
# merge and join
temp_str = chr(1).join(tar_row_temp)
merge_candidate = []
for cell_data in cur_row_temp:
if cell_data in temp_str:
merge_candidate.append(cell_data)
if "" in merge_candidate: merge_candidate.remove("")
merge_candidate_2 = set(merge_candidate)
for a in merge_candidate:
for b in merge_candidate:
if a != b:
if a + b in temp_str:
h_score += 1
if a in merge_candidate_2: merge_candidate_2.remove(a)
if b in merge_candidate_2: merge_candidate_2.remove(b)
else:
if temp_str.index(a) + len(a) + 1 == temp_str.index(b):
h_score += 1
if a in merge_candidate_2: merge_candidate_2.remove(a)
if b in merge_candidate_2: merge_candidate_2.remove(b)
cur_row_temp = [x for x in cur_row_temp if x not in merge_candidate_2]
tar_row_temp = [x for x in tar_row_temp if x not in merge_candidate_2]
# split
cur_remove = set()
tar_remove = set()
for cur_data in cur_row_temp:
if_split = False
for tar_data in tar_row_temp:
if tar_data in cur_data:
if_split = True
tar_remove.add(tar_data)
tar_row_temp = [x for x in tar_row_temp if x not in tar_remove]
if if_split:
cur_remove.add(cur_data)
h_score += 1
cur_row_temp = [x for x in cur_row_temp if x not in cur_remove]
# For fill operation, it delete an empty cell, and add a new cell of random data
# For divide, it simply add a new empty cell
# fill & divide
if cur_row_temp != tar_row_temp:
h_score += 1
self.h_score = h_score
return h_score
elif heuristic_no == 1:
# Simple first try, see how many splits or joins it might take
if self.h_score:
self.h_score = 0
# This is the root node. TODO: this is sort of hacky
if parent_row is None:
self.h_score = float('inf')
self.h_score = 1.0
h_score_source = 1
# # Calculate fold and unfold
# elif len(self.contents) != FooNode.goal_node.num_rows:
# self.h_score = 1
# target_1d = np.array(self.target)
# target_1d = target_1d.ravel()
# for item in targ_row:
# if item not in target_1d:
# self.h_score += 0.5
# Calculate how many moves needed. This is calculated only when moves are the only operations left to be done.
elif Counter(row) == Counter(targ_row):
row_temp = list(row)
targ_row_temp = list(targ_row)
p = 0
while p < len(targ_row_temp) - 1:
if row_temp.index(targ_row_temp[p]) < row_temp.index(targ_row_temp[p + 1]):
row_temp.remove(targ_row_temp[p])
p += 1
else:
break
self.h_score = (len(targ_row_temp) - p - 1) / 2
h_score_source = 2
# Calculate how many copies and drops are needed. This is calculated only when copies and drops are the only operations to be done.
elif (set(row) < set(targ_row) or set(targ_row) < set(row)) and Counter(row) != Counter(targ_row):
cr = Counter(row)
ct = Counter(targ_row)
for item in set(row):
self.h_score += abs(cr[item] - ct[item]) / 2
h_score_source = 3
# If it's probably a dropped column, use the difference in
# the number of columns.
elif (len(row) > len(targ_row) and
is_subset(targ_row, row)):
self.h_score = abs(len(row) - len(targ_row))
h_score_source = 4
# If we need to do some splits to get the target out...
elif is_messy_subset(targ_row, row):
self.h_score = count_matches(targ_row, row)
h_score_source = 5
else:
content_str = chr(1).join(row)
target_str = chr(1).join(targ_row)
count = 0
join_targs = defaultdict(list)
in_join_targs = defaultdict(list)
drop_cands = []
dupes = set([])
for c in row:
if c == '':
count += 1
else:
for t in targ_row:
# if this part of the current state is in the
# we may have a join.
if c != t and c in t:
num_in_targ = t.count(c)
num_seen = chr(1).join(join_targs[t]).count(c)
# if we have more in the target that we've seen
# yet, add this one. We need to do this in case
# rows have duplicates.
if num_in_targ > num_seen:
join_targs[t].append(c)
in_join_targs[c].append(t)
# This is an attempt to handle the case where there
# are duplicate columns in the input
contents_cnt = content_str.count(c)
targ_cnt = target_str.count(c)
if targ_cnt > 0 and contents_cnt - targ_cnt > 0:
if c not in dupes:
if H_DEBUG: print "Dupe drop:", c, 1
count += 1
dupes.add(c)
if c not in target_str and c not in targ_row:
drop_cands.append(c)
# use this to keep track of what pieces we've already used for
# joins, so they don't get double counted later.
used_joins = set([])
# This counts join ops. If 2 or more items are joins, there
# are actually 1 fewer ops than items being joined
for k, v in join_targs.iteritems():
if len(v) > 1:
joined = chr(1).join(v)
if H_DEBUG: print "Joins:", k, v, editdistance.eval(joined, k)
count += editdistance.eval(joined, k)
used_joins.add(k)
elif len(in_join_targs[v[0]]) == 1: # no duplicate matches
e_dist = editdistance.eval(k, v[0])
count += e_dist # TODO: this is not right
used_joins.add(k)
if H_DEBUG: print "Add chars:", k, v, count
no_drops = set([])
also_no_drops = set([]) # this is for CASE4
min_edists = defaultdict(int)
for t in targ_row:
if t in row:
continue
for d in drop_cands:
e_dist = 0
for eo in Levenshtein.editops(t, d):
if eo[0] == 'replace':
e_dist += 2
else:
e_dist += 1
if H_DEBUG: print "START:", t, "|", d, e_dist
if d not in no_drops and t in d:
if H_DEBUG: print "* CASE1:", t, d, e_dist, 1, count
count += 2
no_drops.add(d)
elif d not in no_drops and d in t and e_dist < len(t) and e_dist >= len(d):
if H_DEBUG: print "* CASE2:", t, "|", d, e_dist, 2, count
count += 2 # split and drop (or similar)
no_drops.add(d)
elif d not in no_drops and d in t and e_dist < len(t) and e_dist < len(d):
if H_DEBUG: print "* CASE3:", t, d, e_dist, e_dist, count
count += e_dist # char. operations
no_drops.add(d)
elif e_dist >= len(t) and e_dist < len(d):
if H_DEBUG: print "* CASE4:", t, d, e_dist
count += 3 # include a split in here
also_no_drops.add(d)
elif e_dist < len(t) and e_dist < len(d):
if H_DEBUG: print "* CASE5:", t, d, e_dist
if min_edists[t] == 0 or min_edists[t] > e_dist:
min_edists[t] = e_dist
no_drops.add(d)
for k, v in min_edists.iteritems():
# don't double count ones we've already used in joins
if k not in used_joins:
count += v
no_drops.update(also_no_drops)
# drops for all the candidates we didn't just account for
drops_count = 0
for d in drop_cands:
if d not in no_drops and d not in dupes:
should_count = True
for dupe in dupes:
if dupe in d:
should_count = False
break
if should_count:
drops_count += 1
count += drops_count
if H_DEBUG:
print "drops:", drops_count
self.h_score = count
h_score_source = 7
if H_DEBUG:
print "h_score_source:", h_score_source
self.times['scores'].append(timer() - start)
if debug:
return self.h_score, h_score_source
else:
return self.h_score
def __hash__(self):
return self.f_hash
def __eq__(self, other):
if self.f_hash == other.f_hash:
return True
if other.col_hash == self.col_hash:
return True
return False
def identical(self, other):
if self.f_hash == other.f_hash:
return True
return False
def __ne__(self, other):
if not other and self:
return True
if self.f_hash == other.f_hash:
return False
if other.col_hash == self.col_hash:
return False
return True
def __str__(self):
if 'num_params' in self.operation[0].keys():
if self.operation[0]['num_params'] == 1:
return self.operation[0]['name']
elif self.operation[0]['num_params'] == 2:
return self.operation[0]['name'] + " on column " + str(self.operation[1])
else:
return self.operation[0]['name']
else:
return self.operation[0]['name']
def __cmp__(self, other):
score = (self.f_score > other.f_score) - (self.f_score < other.f_score)
if score != 0:
return score
return (other.node_id > self.node_id) - (other.node_id < self.node_id)
# class method
def get_cols_from_table(table):
cols = []
col_data = []
new_table = map(list, zip(*table))
for col in new_table:
cols.append(tuple(col))
col_set = set(col)
if "" in col_set:
col_set.remove("")
col_data.append(tuple(col_set))
return cols, col_data
def table_of_same_values(node_a, node_b):
a = set(node_a.prop_data)
a.add("")
b = set(node_b.prop_data)
b.add("")
if a == b:
return True
else:
return False
def table_values_is_subset(node_a, node_b):
a = set(node_a.prop_data)
a.add("")
b = set(node_b.prop_data)
b.add("")
if a <= b:
return True
else:
return False
| |
# coding=utf-8
#
# Copyright 2014 Red Hat, Inc.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver wrapping the Ironic API, such that Nova may provision
bare metal resources.
"""
import logging as py_logging
import time
from oslo.config import cfg
import six
from nova.compute import arch
from nova.compute import hvtype
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import objects
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.ironic import client_wrapper
from nova.virt.ironic import ironic_states
from nova.virt.ironic import patcher
ironic = None
LOG = logging.getLogger(__name__)
opts = [
cfg.IntOpt('api_version',
default=1,
help='Version of Ironic API service endpoint.'),
cfg.StrOpt('api_endpoint',
help='URL for Ironic API endpoint.'),
cfg.StrOpt('admin_username',
help='Ironic keystone admin name'),
cfg.StrOpt('admin_password',
help='Ironic keystone admin password.'),
cfg.StrOpt('admin_auth_token',
help='Ironic keystone auth token.'),
cfg.StrOpt('admin_url',
help='Keystone public API endpoint.'),
cfg.StrOpt('client_log_level',
help='Log level override for ironicclient. Set this in '
'order to override the global "default_log_levels", '
'"verbose", and "debug" settings.'),
cfg.StrOpt('admin_tenant_name',
help='Ironic keystone tenant name.'),
cfg.IntOpt('api_max_retries',
default=60,
help=('How many retries when a request does conflict.')),
cfg.IntOpt('api_retry_interval',
default=2,
help=('How often to retry in seconds when a request '
'does conflict')),
cfg.BoolOpt('api_insecure',
default=False,
help='Allow to perform insecure SSL requests to nova'),
]
ironic_group = cfg.OptGroup(name='ironic',
title='Ironic Options')
CONF = cfg.CONF
CONF.register_group(ironic_group)
CONF.register_opts(opts, ironic_group)
_POWER_STATE_MAP = {
ironic_states.POWER_ON: power_state.RUNNING,
ironic_states.NOSTATE: power_state.NOSTATE,
ironic_states.POWER_OFF: power_state.SHUTDOWN,
}
def map_power_state(state):
try:
return _POWER_STATE_MAP[state]
except KeyError:
LOG.warning(_LW("Power state %s not found."), state)
return power_state.NOSTATE
def _validate_instance_and_node(icli, instance):
"""Get the node associated with the instance.
Check with the Ironic service that this instance is associated with a
node, and return the node.
"""
try:
# TODO(mrda): Bug ID 1365228 icli should be renamed ironicclient
# throughout
return icli.call("node.get_by_instance_uuid", instance['uuid'])
except ironic.exc.NotFound:
raise exception.InstanceNotFound(instance_id=instance['uuid'])
def _get_nodes_supported_instances(cpu_arch=None):
"""Return supported instances for a node."""
if not cpu_arch:
return []
return [(cpu_arch,
hvtype.BAREMETAL,
vm_mode.HVM)]
def _log_ironic_polling(what, node, instance):
power_state = (None if node.power_state is None else
'"%s"' % node.power_state)
tgt_power_state = (None if node.target_power_state is None else
'"%s"' % node.target_power_state)
prov_state = (None if node.provision_state is None else
'"%s"' % node.provision_state)
tgt_prov_state = (None if node.target_provision_state is None else
'"%s"' % node.target_provision_state)
LOG.debug('Still waiting for ironic node %(node)s to %(what)s: '
'power_state=%(power_state)s, '
'target_power_state=%(tgt_power_state)s, '
'provision_state=%(prov_state)s, '
'target_provision_state=%(tgt_prov_state)s',
dict(what=what,
node=node.uuid,
power_state=power_state,
tgt_power_state=tgt_power_state,
prov_state=prov_state,
tgt_prov_state=tgt_prov_state),
instance=instance)
class IronicDriver(virt_driver.ComputeDriver):
"""Hypervisor driver for Ironic - bare metal provisioning."""
capabilities = {"has_imagecache": False,
"supports_recreate": False}
def __init__(self, virtapi, read_only=False):
super(IronicDriver, self).__init__(virtapi)
global ironic
if ironic is None:
ironic = importutils.import_module('ironicclient')
# NOTE(deva): work around a lack of symbols in the current version.
if not hasattr(ironic, 'exc'):
ironic.exc = importutils.import_module('ironicclient.exc')
if not hasattr(ironic, 'client'):
ironic.client = importutils.import_module(
'ironicclient.client')
self.firewall_driver = firewall.load_driver(
default='nova.virt.firewall.NoopFirewallDriver')
self.node_cache = {}
self.node_cache_time = 0
# TODO(mrda): Bug ID 1365230 Logging configurability needs
# to be addressed
icli_log_level = CONF.ironic.client_log_level
if icli_log_level:
level = py_logging.getLevelName(icli_log_level)
logger = py_logging.getLogger('ironicclient')
logger.setLevel(level)
def _node_resources_unavailable(self, node_obj):
"""Determine whether the node's resources are in an acceptable state.
Determines whether the node's resources should be presented
to Nova for use based on the current power and maintenance state.
Returns True if unacceptable.
"""
bad_states = [ironic_states.ERROR, ironic_states.NOSTATE]
return (node_obj.maintenance or
node_obj.power_state in bad_states)
def _node_resource(self, node):
"""Helper method to create resource dict from node stats."""
vcpus = int(node.properties.get('cpus', 0))
memory_mb = int(node.properties.get('memory_mb', 0))
local_gb = int(node.properties.get('local_gb', 0))
raw_cpu_arch = node.properties.get('cpu_arch', None)
try:
cpu_arch = arch.canonicalize(raw_cpu_arch)
except exception.InvalidArchitectureName:
cpu_arch = None
if not cpu_arch:
LOG.warn(_LW("cpu_arch not defined for node '%s'"), node.uuid)
nodes_extra_specs = {}
# NOTE(deva): In Havana and Icehouse, the flavor was required to link
# to an arch-specific deploy kernel and ramdisk pair, and so the flavor
# also had to have extra_specs['cpu_arch'], which was matched against
# the ironic node.properties['cpu_arch'].
# With Juno, the deploy image(s) may be referenced directly by the
# node.driver_info, and a flavor no longer needs to contain any of
# these three extra specs, though the cpu_arch may still be used
# in a heterogeneous environment, if so desired.
# NOTE(dprince): we use the raw cpu_arch here because extra_specs
# filters aren't canonicalized
nodes_extra_specs['cpu_arch'] = raw_cpu_arch
# NOTE(gilliard): To assist with more precise scheduling, if the
# node.properties contains a key 'capabilities', we expect the value
# to be of the form "k1:v1,k2:v2,etc.." which we add directly as
# key/value pairs into the node_extra_specs to be used by the
# ComputeCapabilitiesFilter
capabilities = node.properties.get('capabilities')
if capabilities:
for capability in str(capabilities).split(','):
parts = capability.split(':')
if len(parts) == 2 and parts[0] and parts[1]:
nodes_extra_specs[parts[0]] = parts[1]
else:
LOG.warn(_LW("Ignoring malformed capability '%s'. "
"Format should be 'key:val'."), capability)
vcpus_used = 0
memory_mb_used = 0
local_gb_used = 0
if node.instance_uuid:
# Node has an instance, report all resource as unavailable
vcpus_used = vcpus
memory_mb_used = memory_mb
local_gb_used = local_gb
elif self._node_resources_unavailable(node):
# The node's current state is such that it should not present any
# of its resources to Nova
vcpus = 0
memory_mb = 0
local_gb = 0
dic = {
'node': str(node.uuid),
'hypervisor_hostname': str(node.uuid),
'hypervisor_type': self._get_hypervisor_type(),
'hypervisor_version': self._get_hypervisor_version(),
'cpu_info': 'baremetal cpu',
'vcpus': vcpus,
'vcpus_used': vcpus_used,
'local_gb': local_gb,
'local_gb_used': local_gb_used,
'disk_total': local_gb,
'disk_used': local_gb_used,
'disk_available': local_gb - local_gb_used,
'memory_mb': memory_mb,
'memory_mb_used': memory_mb_used,
'host_memory_total': memory_mb,
'host_memory_free': memory_mb - memory_mb_used,
'supported_instances': jsonutils.dumps(
_get_nodes_supported_instances(cpu_arch)),
'stats': jsonutils.dumps(nodes_extra_specs),
'host': CONF.host,
}
dic.update(nodes_extra_specs)
return dic
def _start_firewall(self, instance, network_info):
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self.firewall_driver.apply_instance_filter(instance, network_info)
def _stop_firewall(self, instance, network_info):
self.firewall_driver.unfilter_instance(instance, network_info)
def _add_driver_fields(self, node, instance, image_meta, flavor,
preserve_ephemeral=None):
icli = client_wrapper.IronicClientWrapper()
patch = patcher.create(node).get_deploy_patch(instance,
image_meta,
flavor,
preserve_ephemeral)
# Associate the node with an instance
patch.append({'path': '/instance_uuid', 'op': 'add',
'value': instance['uuid']})
try:
icli.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest:
msg = (_("Failed to add deploy parameters on node %(node)s "
"when provisioning the instance %(instance)s")
% {'node': node.uuid, 'instance': instance['uuid']})
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def _cleanup_deploy(self, context, node, instance, network_info):
icli = client_wrapper.IronicClientWrapper()
# TODO(mrda): It would be better to use instance.get_flavor() here
# but right now that doesn't include extra_specs which are required
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
patch = patcher.create(node).get_cleanup_patch(instance, network_info,
flavor)
# Unassociate the node
patch.append({'op': 'remove', 'path': '/instance_uuid'})
try:
icli.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest:
LOG.error(_LE("Failed to clean up the parameters on node %(node)s "
"when unprovisioning the instance %(instance)s"),
{'node': node.uuid, 'instance': instance['uuid']})
reason = (_("Fail to clean up node %s parameters") % node.uuid)
raise exception.InstanceTerminationFailure(reason=reason)
self._unplug_vifs(node, instance, network_info)
self._stop_firewall(instance, network_info)
def _wait_for_active(self, icli, instance):
"""Wait for the node to be marked as ACTIVE in Ironic."""
node = _validate_instance_and_node(icli, instance)
if node.provision_state == ironic_states.ACTIVE:
# job is done
LOG.debug("Ironic node %(node)s is now ACTIVE",
dict(node=node.uuid), instance=instance)
raise loopingcall.LoopingCallDone()
if node.target_provision_state == ironic_states.DELETED:
# ironic is trying to delete it now
raise exception.InstanceNotFound(instance_id=instance['uuid'])
if node.provision_state == ironic_states.NOSTATE:
# ironic already deleted it
raise exception.InstanceNotFound(instance_id=instance['uuid'])
if node.provision_state == ironic_states.DEPLOYFAIL:
# ironic failed to deploy
msg = (_("Failed to provision instance %(inst)s: %(reason)s")
% {'inst': instance['uuid'], 'reason': node.last_error})
raise exception.InstanceDeployFailure(msg)
_log_ironic_polling('become ACTIVE', node, instance)
def _wait_for_power_state(self, icli, instance, message):
"""Wait for the node to complete a power state change."""
node = _validate_instance_and_node(icli, instance)
if node.target_power_state == ironic_states.NOSTATE:
raise loopingcall.LoopingCallDone()
_log_ironic_polling(message, node, instance)
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function.
:param host: the hostname of the compute host.
"""
return
def _get_hypervisor_type(self):
"""Get hypervisor type."""
return 'ironic'
def _get_hypervisor_version(self):
"""Returns the version of the Ironic API service endpoint."""
return CONF.ironic.api_version
def instance_exists(self, instance):
"""Checks the existence of an instance.
Checks the existence of an instance. This is an override of the
base method for efficiency.
:param instance: The instance object.
:returns: True if the instance exists. False if not.
"""
icli = client_wrapper.IronicClientWrapper()
try:
_validate_instance_and_node(icli, instance)
return True
except exception.InstanceNotFound:
return False
def list_instances(self):
"""Return the names of all the instances provisioned.
:returns: a list of instance names.
"""
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call("node.list", associated=True)
context = nova_context.get_admin_context()
return [objects.Instance.get_by_uuid(context,
i.instance_uuid).name
for i in node_list]
def list_instance_uuids(self):
"""Return the UUIDs of all the instances provisioned.
:returns: a list of instance UUIDs.
"""
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call("node.list", associated=True)
return list(n.instance_uuid for n in node_list)
def node_is_available(self, nodename):
"""Confirms a Nova hypervisor node exists in the Ironic inventory.
:param nodename: The UUID of the node.
:returns: True if the node exists, False if not.
"""
# NOTE(comstud): We can cheat and use caching here. This method
# just needs to return True for nodes that exist. It doesn't
# matter if the data is stale. Sure, it's possible that removing
# node from Ironic will cause this method to return True until
# the next call to 'get_available_nodes', but there shouldn't
# be much harm. There's already somewhat of a race.
if not self.node_cache:
# Empty cache, try to populate it.
self._refresh_cache()
if nodename in self.node_cache:
return True
# NOTE(comstud): Fallback and check Ironic. This case should be
# rare.
icli = client_wrapper.IronicClientWrapper()
try:
icli.call("node.get", nodename)
return True
except ironic.exc.NotFound:
return False
def _refresh_cache(self):
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call('node.list', detail=True)
node_cache = {}
for node in node_list:
node_cache[node.uuid] = node
self.node_cache = node_cache
self.node_cache_time = time.time()
def get_available_nodes(self, refresh=False):
"""Returns the UUIDs of all nodes in the Ironic inventory.
:param refresh: Boolean value; If True run update first. Ignored by
this driver.
:returns: a list of UUIDs
"""
# NOTE(jroll) we refresh the cache every time this is called
# because it needs to happen in the resource tracker
# periodic task. This task doesn't pass refresh=True,
# unfortunately.
self._refresh_cache()
node_uuids = list(self.node_cache.keys())
LOG.debug("Returning %(num_nodes)s available node(s)",
dict(num_nodes=len(node_uuids)))
return node_uuids
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: the UUID of the node.
:returns: a dictionary describing resources.
"""
# NOTE(comstud): We can cheat and use caching here. This method is
# only called from a periodic task and right after the above
# get_available_nodes() call is called.
if not self.node_cache:
# Well, it's also called from init_host(), so if we have empty
# cache, let's try to populate it.
self._refresh_cache()
cache_age = time.time() - self.node_cache_time
if nodename in self.node_cache:
LOG.debug("Using cache for node %(node)s, age: %(age)s",
{'node': nodename, 'age': cache_age})
node = self.node_cache[nodename]
else:
LOG.debug("Node %(node)s not found in cache, age: %(age)s",
{'node': nodename, 'age': cache_age})
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", nodename)
return self._node_resource(node)
def get_info(self, instance):
"""Get the current state and resource usage for this instance.
If the instance is not found this method returns (a dictionary
with) NOSTATE and all resources == 0.
:param instance: the instance object.
:returns: a dictionary containing:
:state: the running state. One of :mod:`nova.compute.power_state`.
:max_mem: (int) the maximum memory in KBytes allowed.
:mem: (int) the memory in KBytes used by the domain.
:num_cpu: (int) the number of CPUs.
:cpu_time: (int) the CPU time used in nanoseconds. Always 0 for
this driver.
"""
icli = client_wrapper.IronicClientWrapper()
try:
node = _validate_instance_and_node(icli, instance)
except exception.InstanceNotFound:
return {'state': map_power_state(ironic_states.NOSTATE),
'max_mem': 0,
'mem': 0,
'num_cpu': 0,
'cpu_time': 0
}
memory_kib = int(node.properties.get('memory_mb', 0)) * 1024
if memory_kib == 0:
LOG.warn(_LW("Warning, memory usage is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': instance['node']})
num_cpu = node.properties.get('cpus', 0)
if num_cpu == 0:
LOG.warn(_LW("Warning, number of cpus is 0 for "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': instance['node']})
return {'state': map_power_state(node.power_state),
'max_mem': memory_kib,
'mem': memory_kib,
'num_cpu': num_cpu,
'cpu_time': 0
}
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?
:param instance: the instance object.
:returns: Boolean value. If True deallocate networks on reschedule.
"""
return True
def macs_for_instance(self, instance):
"""List the MAC addresses of an instance.
List of MAC addresses for the node which this instance is
associated with.
:param instance: the instance object.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
icli = client_wrapper.IronicClientWrapper()
try:
node = icli.call("node.get", instance['node'])
except ironic.exc.NotFound:
return None
ports = icli.call("node.list_ports", node.uuid)
return set([p.address for p in ports])
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Deploy an instance.
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image dict returned by nova.image.glance
that defines the image from which to boot this instance.
:param injected_files: User files to inject into instance. Ignored
by this driver.
:param admin_password: Administrator password to set in
instance. Ignored by this driver.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
"""
# The compute manager is meant to know the node uuid, so missing uuid
# is a significant issue. It may mean we've been passed the wrong data.
node_uuid = instance.get('node')
if not node_uuid:
raise ironic.exc.BadRequest(
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance['uuid'])
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", node_uuid)
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
self._add_driver_fields(node, instance, image_meta, flavor)
# NOTE(Shrews): The default ephemeral device needs to be set for
# services (like cloud-init) that depend on it being returned by the
# metadata server. Addresses bug https://launchpad.net/bugs/1324286.
if flavor['ephemeral_gb']:
instance.default_ephemeral_device = '/dev/sda1'
instance.save()
# validate we are ready to do the deploy
validate_chk = icli.call("node.validate", node_uuid)
if not validate_chk.deploy or not validate_chk.power:
# something is wrong. undo what we have done
self._cleanup_deploy(context, node, instance, network_info)
raise exception.ValidationError(_(
"Ironic node: %(id)s failed to validate."
" (deploy: %(deploy)s, power: %(power)s)")
% {'id': node.uuid,
'deploy': validate_chk.deploy,
'power': validate_chk.power})
# prepare for the deploy
try:
self._plug_vifs(node, instance, network_info)
self._start_firewall(instance, network_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error preparing deploy for instance "
"%(instance)s on baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': node_uuid})
self._cleanup_deploy(context, node, instance, network_info)
# trigger the node deploy
try:
icli.call("node.set_provision_state", node_uuid,
ironic_states.ACTIVE)
except Exception as e:
with excutils.save_and_reraise_exception():
msg = (_LE("Failed to request Ironic to provision instance "
"%(inst)s: %(reason)s"),
{'inst': instance['uuid'],
'reason': six.text_type(e)})
LOG.error(msg)
self._cleanup_deploy(context, node, instance, network_info)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
icli, instance)
try:
timer.start(interval=CONF.ironic.api_retry_interval).wait()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error deploying instance %(instance)s on "
"baremetal node %(node)s."),
{'instance': instance['uuid'],
'node': node_uuid})
self.destroy(context, instance, network_info)
def _unprovision(self, icli, instance, node):
"""This method is called from destroy() to unprovision
already provisioned node after required checks.
"""
try:
icli.call("node.set_provision_state", node.uuid, "deleted")
except Exception as e:
# if the node is already in a deprovisioned state, continue
# This should be fixed in Ironic.
# TODO(deva): This exception should be added to
# python-ironicclient and matched directly,
# rather than via __name__.
if getattr(e, '__name__', None) != 'InstanceDeployFailure':
raise
# using a dict because this is modified in the local method
data = {'tries': 0}
def _wait_for_provision_state():
node = _validate_instance_and_node(icli, instance)
if not node.provision_state:
LOG.debug("Ironic node %(node)s is now unprovisioned",
dict(node=node.uuid), instance=instance)
raise loopingcall.LoopingCallDone()
if data['tries'] >= CONF.ironic.api_max_retries:
msg = (_("Error destroying the instance on node %(node)s. "
"Provision state still '%(state)s'.")
% {'state': node.provision_state,
'node': node.uuid})
LOG.error(msg)
raise exception.NovaException(msg)
else:
data['tries'] += 1
_log_ironic_polling('unprovision', node, instance)
# wait for the state transition to finish
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_provision_state)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def destroy(self, context, instance, network_info,
block_device_info=None, destroy_disks=True, migrate_data=None):
"""Destroy the specified instance, if it can be found.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param destroy_disks: Indicates if disks should be
destroyed. Ignored by this driver.
:param migrate_data: implementation specific params.
Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
try:
node = _validate_instance_and_node(icli, instance)
except exception.InstanceNotFound:
LOG.warning(_LW("Destroy called on non-existing instance %s."),
instance['uuid'])
# NOTE(deva): if nova.compute.ComputeManager._delete_instance()
# is called on a non-existing instance, the only way
# to delete it is to return from this method
# without raising any exceptions.
return
if node.provision_state in (ironic_states.ACTIVE,
ironic_states.DEPLOYFAIL,
ironic_states.ERROR,
ironic_states.DEPLOYWAIT):
self._unprovision(icli, instance, node)
self._cleanup_deploy(context, node, instance, network_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
NOTE: Ironic does not support soft-off, so this method
always performs a hard-reboot.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param reboot_type: Either a HARD or SOFT reboot. Ignored by
this driver.
:param block_device_info: Info pertaining to attached volumes.
Ignored by this driver.
:param bad_volumes_callback: Function to handle any bad volumes
encountered. Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'reboot')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
icli, instance, 'reboot')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
NOTE: Ironic does not support soft-off, so this method ignores
timeout and retry_interval parameters.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param instance: The instance object.
:param timeout: time to wait for node to shutdown. Ignored by
this driver.
:param retry_interval: How often to signal node while waiting
for it to shutdown. Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'off')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
icli, instance, 'power off')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
NOTE: Unlike the libvirt driver, this method does not delete
and recreate the instance; it preserves local state.
:param context: The security context.
:param instance: The instance object.
:param network_info: Instance network information. Ignored by
this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
"""
icli = client_wrapper.IronicClientWrapper()
node = _validate_instance_and_node(icli, instance)
icli.call("node.set_power_state", node.uuid, 'on')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
icli, instance, 'power on')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def get_host_stats(self, refresh=False):
"""Return the currently known stats for all Ironic nodes.
:param refresh: Boolean value; If True run update first. Ignored by
this driver.
:returns: a list of dictionaries; each dictionary contains the
stats for a node.
"""
caps = []
icli = client_wrapper.IronicClientWrapper()
node_list = icli.call("node.list")
for node in node_list:
data = self._node_resource(node)
caps.append(data)
return caps
def refresh_security_group_rules(self, security_group_id):
"""Refresh security group rules from data store.
Invoked when security group rules are updated.
:param security_group_id: The security group id.
"""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""Refresh security group members from data store.
Invoked when instances are added/removed to a security group.
:param security_group_id: The security group id.
"""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_provider_fw_rules(self):
"""Triggers a firewall update based on database changes."""
self.firewall_driver.refresh_provider_fw_rules()
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules from data store.
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or loses a rule.
:param instance: The instance object.
"""
self.firewall_driver.refresh_instance_security_rules(instance)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Set up filtering rules.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance.
:param instance: The instance object.
:param network_info: Instance network information.
"""
self.firewall_driver.unfilter_instance(instance, network_info)
def _plug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this ahead of time so that we
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("plug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance['uuid'],
'network_info': network_info_str})
# start by ensuring the ports are clear
self._unplug_vifs(node, instance, network_info)
icli = client_wrapper.IronicClientWrapper()
ports = icli.call("node.list_ports", node.uuid)
if len(network_info) > len(ports):
raise exception.NovaException(_(
"Ironic node: %(id)s virtual to physical interface count"
" missmatch"
" (Vif count: %(vif_count)d, Pif count: %(pif_count)d)")
% {'id': node.uuid,
'vif_count': len(network_info),
'pif_count': len(ports)})
if len(network_info) > 0:
# not needed if no vif are defined
for vif, pif in zip(network_info, ports):
# attach what neutron needs directly to the port
port_id = unicode(vif['id'])
patch = [{'op': 'add',
'path': '/extra/vif_port_id',
'value': port_id}]
icli.call("port.update", pif.uuid, patch)
def _unplug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
# it wraps hasn't finished, so do this ahead of time so that we
# don't block while holding the logging lock.
network_info_str = str(network_info)
LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(network_info)s",
{'uuid': instance['uuid'],
'network_info': network_info_str})
if network_info and len(network_info) > 0:
icli = client_wrapper.IronicClientWrapper()
ports = icli.call("node.list_ports", node.uuid)
# not needed if no vif are defined
for vif, pif in zip(network_info, ports):
# we can not attach a dict directly
patch = [{'op': 'remove', 'path': '/extra/vif_port_id'}]
try:
icli.call("port.update", pif.uuid, patch)
except ironic.exc.BadRequest:
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", instance['node'])
self._plug_vifs(node, instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks.
:param instance: The instance object.
:param network_info: Instance network information.
"""
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", instance['node'])
self._unplug_vifs(node, instance, network_info)
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
"""Rebuild/redeploy an instance.
This version of rebuild() allows for supporting the option to
preserve the ephemeral partition. We cannot call spawn() from
here because it will attempt to set the instance_uuid value
again, which is not allowed by the Ironic API. It also requires
the instance to not have an 'active' provision state, but we
cannot safely change that. Given that, we implement only the
portions of spawn() we need within rebuild().
:param context: The security context.
:param instance: The instance object.
:param image_meta: Image object returned by nova.image.glance
that defines the image from which to boot this instance. Ignored
by this driver.
:param injected_files: User files to inject into instance. Ignored
by this driver.
:param admin_password: Administrator password to set in
instance. Ignored by this driver.
:param bdms: block-device-mappings to use for rebuild. Ignored
by this driver.
:param detach_block_devices: function to detach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param attach_block_devices: function to attach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage. Ignored by this driver.
:param network_info: Instance network information. Ignored by
this driver.
:param recreate: Boolean value; if True the instance is
recreated on a new hypervisor - all the cleanup of old state is
skipped. Ignored by this driver.
:param block_device_info: Instance block device
information. Ignored by this driver.
:param preserve_ephemeral: Boolean value; if True the ephemeral
must be preserved on rebuild.
"""
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(expected_task_state=[task_states.REBUILDING])
node_uuid = instance.node
icli = client_wrapper.IronicClientWrapper()
node = icli.call("node.get", node_uuid)
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
self._add_driver_fields(node, instance, image_meta, flavor,
preserve_ephemeral)
# Trigger the node rebuild/redeploy.
try:
icli.call("node.set_provision_state",
node_uuid, ironic_states.REBUILD)
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
ironic.exc.BadRequest) as e: # Maintenance
msg = (_("Failed to request Ironic to rebuild instance "
"%(inst)s: %(reason)s") % {'inst': instance['uuid'],
'reason': six.text_type(e)})
raise exception.InstanceDeployFailure(msg)
# Although the target provision state is REBUILD, it will actually go
# to ACTIVE once the redeploy is finished.
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
icli, instance)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
| |
import os
import re
import sys
import shutil
from datetime import datetime
from flask import g, url_for, make_response, request, send_file, json
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from pegasus.service import app, db
from pegasus.service.command import ClientCommand, CompoundCommand
from pegasus.service.api import *
SC_FORMATS = ["XML","XML2"]
TC_FORMATS = ["File","Text"]
RC_FORMATS = ["File","Regex"]
FORMATS = {
"replica": RC_FORMATS,
"transformation": TC_FORMATS,
"site": SC_FORMATS
}
def validate_catalog_name(name):
if name is None:
raise APIError("Specify catalog name")
if len(name) >= 100:
raise APIError("Catalog name too long: %d" % len(name))
if ".." in name or re.match(r"\A[a-zA-Z0-9.]+\Z", name) is None:
raise APIError("Invalid catalog name: %s" % name)
return name
def validate_catalog_format(catalog_type, format):
if catalog_type not in FORMATS:
raise APIError("Invalid catalog type: %s" % catalog_type)
lower_formats = dict([(fmt.lower(),fmt) for fmt in FORMATS[catalog_type]])
if format is None:
raise APIError("Invalid catalog format: None")
lower_format = format.lower()
if lower_format not in lower_formats:
raise APIError("Invalid %s catalog format: %s" % (catalog_type, format))
return lower_formats[lower_format]
class CatalogMixin:
def set_name(self, name):
self.name = validate_catalog_name(name)
def set_created(self):
self.created = datetime.utcnow()
def set_updated(self):
self.updated = datetime.utcnow()
def set_format(self, format):
self.format = validate_catalog_format(self.__catalog_type__, format)
def get_catalog_file(self):
userdata = self.user.get_userdata_dir()
return os.path.join(userdata, "catalogs", self.__catalog_type__, self.name)
def save_catalog_file(self, file):
filename = self.get_catalog_file()
dirname = os.path.dirname(filename)
if os.path.exists(filename):
os.remove(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(filename, "wb")
try:
shutil.copyfileobj(file, f)
finally:
f.close()
class ReplicaCatalog(CatalogMixin, db.Model):
__tablename__ = 'replica_catalog'
__table_args__ = (
db.UniqueConstraint('user_id', 'name'),
{'mysql_engine':'InnoDB'}
)
__catalog_type__ = 'replica'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False)
format = db.Column(db.Enum(*RC_FORMATS), nullable=False)
created = db.Column(db.DateTime, nullable=False)
updated = db.Column(db.DateTime, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship("User")
def __init__(self, user_id, name, format):
self.user_id = user_id
self.set_name(name)
self.set_format(format)
self.set_created()
self.set_updated()
class SiteCatalog(db.Model, CatalogMixin):
__tablename__ = 'site_catalog'
__table_args__ = (
db.UniqueConstraint('user_id', 'name'),
{'mysql_engine':'InnoDB'}
)
__catalog_type__ = 'site'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False)
format = db.Column(db.Enum(*SC_FORMATS), nullable=False)
created = db.Column(db.DateTime, nullable=False)
updated = db.Column(db.DateTime, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship("User")
def __init__(self, user_id, name, format):
self.user_id = user_id
self.set_name(name)
self.set_format(format)
self.set_created()
self.set_updated()
class TransformationCatalog(db.Model, CatalogMixin):
__tablename__ = 'transformation_catalog'
__table_args__ = (
db.UniqueConstraint('user_id', 'name'),
{'mysql_engine':'InnoDB'}
)
__catalog_type__ = 'transformation'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), nullable=False)
format = db.Column(db.Enum(*TC_FORMATS), nullable=False)
created = db.Column(db.DateTime, nullable=False)
updated = db.Column(db.DateTime, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship("User")
def __init__(self, user_id, name, format):
self.user_id = user_id
self.set_name(name)
self.set_format(format)
self.set_created()
self.set_updated()
def catalog_object(catalog_type, c):
return {
"id": c.id,
"name": c.name,
"created": c.created,
"updated": c.updated,
"format": c.format,
"href": url_for("route_get_catalog", catalog_type=catalog_type, name=c.name, _external=True)
}
def get_catalog_model(catalog_type):
if catalog_type == "replica":
return ReplicaCatalog
elif catalog_type == "site":
return SiteCatalog
elif catalog_type == "transformation":
return TransformationCatalog
else:
raise APIError("Invalid catalog type: %s" % catalog_type, status_code=400)
def get_catalog(catalog_type, user_id, name):
try:
Catalog = get_catalog_model(catalog_type)
return Catalog.query.filter_by(user_id=user_id, name=name).one()
except NoResultFound:
raise APIError("No such catalog: %s" % name, 404)
def list_catalogs(catalog_type, user_id):
Catalog = get_catalog_model(catalog_type)
return Catalog.query.filter_by(user_id=user_id).order_by("updated").all()
def save_catalog(catalog_type, user_id, name, format, file):
Catalog = get_catalog_model(catalog_type)
try:
cat = Catalog(user_id, name, format)
db.session.add(cat)
db.session.flush()
except IntegrityError, e:
raise APIError("Duplicate catalog name")
cat.save_catalog_file(file)
return cat
@app.route("/catalogs", methods=["GET"])
def route_all_catalogs():
result = {
"site": url_for("route_list_catalogs", catalog_type="site", _external=True),
"replica": url_for("route_list_catalogs", catalog_type="replica", _external=True),
"transformation": url_for("route_list_catalogs", catalog_type="transformation", _external=True)
}
return json_response(result)
@app.route("/catalogs/<string:catalog_type>", methods=["GET"])
def route_list_catalogs(catalog_type):
clist = list_catalogs(catalog_type, g.user.id)
result = [catalog_object(catalog_type, c) for c in clist]
return json_response(result)
@app.route("/catalogs/<string:catalog_type>", methods=["POST"])
def route_store_catalog(catalog_type):
# The name of the catalog
name = request.form.get("name", None)
if name is None:
raise APIError("Specify name")
# The format of the catalog
format = request.form.get("format", None)
if format is None:
raise APIError("Specify format")
# The catalog file
file = request.files.get("file", None)
if file is None:
raise APIError("Specify file")
save_catalog(catalog_type, g.user.id, name, format, file)
db.session.commit()
return json_created(url_for("route_get_catalog", catalog_type=catalog_type, name=name, _external=True))
@app.route("/catalogs/<string:catalog_type>/<string:name>", methods=["GET"])
def route_get_catalog(catalog_type, name):
c = get_catalog(catalog_type, g.user.id, name)
filename = c.get_catalog_file()
if not os.path.exists(filename):
raise APIError("No such catalog: %s" % name, 404)
return send_file(filename, mimetype="text/plain")
@app.route("/catalogs/<string:catalog_type>/<string:name>", methods=["DELETE"])
def route_delete_catalog(catalog_type, name):
c = get_catalog(catalog_type, g.user.id, name)
db.session.delete(c)
# Update the database before removing the file
# so that we can be sure the database changes
# will go through before removing the file.
db.session.flush()
filename = c.get_catalog_file()
if os.path.exists(filename):
os.remove(filename)
db.session.commit()
return json_response({"message":"deleted"})
@app.route("/catalogs/<string:catalog_type>/<string:name>", methods=["PUT"])
def route_update_catalog(catalog_type, name):
c = get_catalog(catalog_type, g.user.id, name)
c.set_updated()
format = request.form.get("format", None)
if format is not None:
c.set_format(format)
# Update the database before updating the file
# so that we can be sure the database updates
# go through before messing with the file
db.session.flush()
# Update the file contents
file = request.files.get("file", None)
if file is not None:
# Update the file
c.save_catalog_file(file)
db.session.commit()
return json_response(catalog_object(catalog_type, c))
def add_type_option(self):
self.parser.add_option("-t", "--type", action="store", dest="type",
default=None, help="Catalog type (replica, site, transformation)")
def add_name_option(self):
self.parser.add_option("-n", "--name", action="store", dest="name",
default=None, help="Catalog name")
def add_format_option(self):
self.parser.add_option("-F", "--format", action="store", dest="format",
default=None, help="Catalog format (transformation: Text, File; replica: File, Regex; site: XML, XML2)")
def add_file_option(self):
self.parser.add_option("-f", "--file", action="store", dest="file",
default=None, help="Catalog file")
class ListCommand(ClientCommand):
description = "List stored catalogs"
usage = "Usage: %prog list -t TYPE"
def __init__(self):
ClientCommand.__init__(self)
add_type_option(self)
def run(self):
if self.options.type is None:
self.parser.error("Specify -t/--type")
response = self.get("/catalogs/%s" % self.options.type)
result = response.json()
if response.status_code != 200:
print "ERROR:",result["message"]
exit(1)
fmt = "%-20s %-8s %-32s %-32s"
if len(result) > 0:
print fmt % ("NAME","FORMAT","CREATED","UPDATED")
for r in result:
print fmt % (r["name"], r["format"], r["created"], r["updated"])
class UploadCommand(ClientCommand):
description = "Upload a catalog to the server"
usage = "Usage: %prog upload -t TYPE -n NAME -F FORMAT -f FILE"
def __init__(self):
ClientCommand.__init__(self)
add_type_option(self)
add_name_option(self)
add_format_option(self)
add_file_option(self)
def run(self):
if len(self.args) > 0:
self.parser.error("Invalid argument")
o = self.options
if o.type is None:
self.parser.error("Specify -t/--type")
if o.name is None:
self.parser.error("Specify -n/--name")
if o.format is None:
self.parser.error("Specify -F/--format")
if o.file is None:
self.parser.error("Specify -f/--file")
data = {"name": o.name, "format": o.format}
files = {"file": open(o.file, "rb")}
response = self.post("/catalogs/%s" % o.type, data=data, files=files)
if response.status_code != 201:
result = response.json()
print "ERROR:",response.status_code,result["message"]
exit(1)
class UpdateCommand(ClientCommand):
description = "Update a catalog"
usage = "Usage: %prog update -t TYPE -n NAME -F FORMAT -f FILE"
def __init__(self):
ClientCommand.__init__(self)
add_type_option(self)
add_name_option(self)
add_format_option(self)
add_file_option(self)
def run(self):
if len(self.args) > 0:
self.parser.error("Invalid argument")
o = self.options
if o.type is None:
self.parser.error("Specify -t/--type")
if o.name is None:
self.parser.error("Specify -n/--name")
if o.format is None and o.file is None:
self.parser.error("Specify -F/--format and/or -f/--file")
elif len(self.args) > 4:
self.parser.error("Invalid argument")
if o.format:
data = {"format": o.format}
else:
data = {}
if o.file:
files = {"file": open(o.file, "rb")}
else:
files = {}
response = self.put("/catalogs/%s/%s" % (o.type, o.name), data=data, files=files)
if response.status_code != 200:
result = response.json()
print "ERROR:",result["message"]
exit(1)
class DeleteCommand(ClientCommand):
description = "Delete a catalog"
usage = "Usage: %prog delete -t TYPE -n NAME"
def __init__(self):
ClientCommand.__init__(self)
add_type_option(self)
add_name_option(self)
def run(self):
if len(self.args) > 0:
self.parser.error("Invalid argument")
o = self.options
if o.type is None:
self.parser.error("Specify -t/--type")
if o.name is None:
self.parser.error("Specify -n/--name")
response = self.delete("/catalogs/%s/%s" % (o.type, o.name))
result = response.json()
if response.status_code != 200:
print "ERROR:",result["message"]
exit(1)
class DownloadCommand(ClientCommand):
description = "Download a catalog"
usage = "Usage: %prog download -t TYPE -n NAME"
def __init__(self):
ClientCommand.__init__(self)
add_type_option(self)
add_name_option(self)
def run(self):
if len(self.args) > 0:
self.parser.error("Invalid argument")
o = self.options
if o.type is None:
self.parser.error("Specify -t/--type")
if o.name is None:
self.parser.error("Specify -n/--name")
response = self.get("/catalogs/%s/%s" % (o.type, o.name), stream=True)
if response.status_code != 200:
result = response.json()
print "ERROR:",result["message"]
exit(1)
for chunk in response:
sys.stdout.write(chunk)
class CatalogCommand(CompoundCommand):
description = "Client for catalog management"
commands = [
("list", ListCommand),
("upload", UploadCommand),
("download", DownloadCommand),
("update", UpdateCommand),
("delete", DeleteCommand)
]
def main():
"The entry point for pegasus-service-catalogs"
CatalogCommand().main()
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""DIF Status Report Generator.
This tool generates a status report for the DIFs by cross referencing the git
commit history of each DIF with that of the HW it actuates to provide OpenTitan
developers with information about what DIFs require updating.
To display usage run:
./check_dif_statuses.py --help
"""
import argparse
import collections
import io
import itertools
import json
import logging
import re
import subprocess
import sys
from contextlib import redirect_stdout
from enum import Enum
from pathlib import Path
from typing import List
import enlighten
import gitfame
import hjson
import pydriller
from tabulate import tabulate
from termcolor import colored
import topgen.lib as lib
# Maintain a list of IPs that only exist in the top-level area.
#
# Note that there are several templated IPs that are auto-generated in the
# top-level area as well, but since the bulk of the code (including the
# template) lives in the hw/ip area, we do not need to consider them.
# These IPs are slowly being migrated to use the `ipgen` tooling, and are
# defined in the IPS_USING_IPGEN list in the make_new_dif.ip module imported
# above.
_TOP_LEVEL_IPS = {"ast", "sensor_ctrl"}
# Indicates that the DIF work has not yet started.
_NOT_STARTED = colored("NOT STARTED", "red")
# This file is $REPO_TOP/util/check_dif_statuses.py, so it takes two parent()
# calls to get back to the top.
REPO_TOP = Path(__file__).resolve().parent.parent
# Define the DIF library relative to REPO_TOP.
DIFS_RELATIVE_PATH = Path("sw/device/lib/dif")
class _OTComponent(Enum):
"""Type of OpenTitan component."""
DIF = 1
HW = 2
class DIFStatus:
"""Holds all DIF status information for displaying.
Attributes:
dif_name (str): Full name of the DIF including the IP name.
ip (str): Name of the IP the DIF is associated with.
dif_path (Path): Path to the DIF code (relative to REPO_TOP).
hw_path (Path): Path to the HW RTL associated with this DIF.
dif_last_modified (datetime): Date and time the DIF was last modified.
hw_last_modified (datetime): Date and time the HW was last modified.
dif_main_contributors (List[str]): List of emails of DIF contributors.
hw_main_constributors (List[str]): List of emails of HW contributors.
lifecycle_state (str): Lifecycle state string (e.g., S0, S1, ...).
num_functions_defined (int): Number of API functions defined.
num_functions_implemented (int): Number of API functions implemented.
api_complete (bool): Indicates if DIF implements all defined functions.
funcs_unimplemented (Set[str]): Set of unimplemted DIF functions.
"""
def __init__(self, ipgen_ips, top_level, dif_name):
"""Mines metadata to populate this DIFStatus object.
Args:
ipgen_ips: List of IPs generated with the ipgen.py tool.
top_level: Name of the top level design.
dif_name: Full name of the DIF including the IP name.
Raises:
ValueError: Raised if DIF name does not start with "dif_".
"""
# Get DIF/IP names and path.
if not dif_name.startswith("dif_"):
raise ValueError("DIF name should start with \"dif_\".")
self.dif_name = dif_name
self.ip = self.dif_name[4:]
self.dif_path = DIFS_RELATIVE_PATH / dif_name
self.dif_autogen_path = (DIFS_RELATIVE_PATH /
f"autogen/{dif_name}_autogen")
# Check if header file exists - if not then its not even begun.
has_started = self.dif_path.with_suffix(".h").is_file()
# Get (relative) HW RTL path.
if self.ip in ipgen_ips:
self.hw_path = Path(f"hw/{top_level}/ip_autogen/{self.ip}")
elif self.ip in _TOP_LEVEL_IPS:
self.hw_path = Path(f"hw/{top_level}/ip/{self.ip}")
else:
self.hw_path = Path(f"hw/ip/{self.ip}")
# Indicates DIF API completeness.
self.num_functions_defined = -1
self.num_functions_implemented = -1
self.api_complete = False
# Determine last date HW was updated.
self.hw_last_modified = self._get_last_commit_date(
[self.hw_path / "rtl"], [""])
# Determine the main contributor of the HW.
self.hw_main_contributors = self._get_main_contributor_emails(
_OTComponent.HW)
if has_started:
# Determine last date DIF was updated.
self.dif_last_modified = self._get_last_commit_date(
[self.dif_path, self.dif_autogen_path], [".h", ".c"])
# Determine the main contributor of the DIF.
self.dif_main_contributors = self._get_main_contributor_emails(
_OTComponent.DIF)
# Determine lifecycle state
self.lifecycle_state = self._get_dif_lifecycle_state()
# Determine DIF API completeness.
self.funcs_unimplemented = self._get_funcs_unimplemented()
else:
# Set DIF status data to indicate it has not started.
self.dif_last_modified = "-"
self.dif_main_contributors = [_NOT_STARTED]
self.lifecycle_state = "-"
self.funcs_unimplemented = [_NOT_STARTED]
def _get_dif_lifecycle_state(self):
hjson_filename = self.hw_path / f"data/{self.ip}.prj.hjson"
with open(hjson_filename, "r") as life_f:
lifecycle_data = hjson.load(life_f)
# If there are multiple revisions, grab the latest.
if "revisions" in lifecycle_data:
lifecycle_data = lifecycle_data["revisions"][-1]
if "dif_stage" in lifecycle_data:
return lifecycle_data["dif_stage"]
return "-"
def _get_main_contributor_emails(self, component):
# Get contributor stats for HW or DIF (SW) and sort by LOC.
if component == _OTComponent.DIF:
stats = self._get_contributors(
[self.dif_path, self.dif_autogen_path], [".h", ".c"])
else:
stats = self._get_contributors([self.hw_path / "rtl"], [""])
sorted_stats = sorted(stats.items(), key=lambda x: x[1], reverse=True)
# If the second contributor has contributed at least 10% as much as the
# first contributor, include both second and first contributors.
contributor_1_email, contributor_1_loc = sorted_stats[0]
if len(sorted_stats) > 1:
contributor_2_email, contributor_2_loc = sorted_stats[1]
if (float(contributor_2_loc) / float(contributor_1_loc)) > 0.1:
return [contributor_1_email, contributor_2_email]
return [contributor_1_email]
def _get_contributors(self, file_paths, exts):
contributor_stats = collections.defaultdict(int)
for file_path, ext in itertools.product(file_paths, exts):
full_file_path = file_path.with_suffix(ext)
output = io.StringIO()
try:
# Use gitfame to fetch commit stats, captured from STDOUT.
with redirect_stdout(output):
gitfame.main(args=[
f"--incl={full_file_path}", "-s", "-e", "--log=ERROR",
"--format=json"
])
except FileNotFoundError:
logging.error(f"(contributors) file path ({full_file_path}) "
"does not exist.")
sys.exit(1)
gitfame_commit_stats = json.loads(output.getvalue())
for contributor_stat in gitfame_commit_stats["data"]:
contributor = contributor_stat[0]
loc = contributor_stat[1]
if loc == 0:
break
contributor_stats[contributor] += loc
return contributor_stats
def _get_last_commit_date(self, file_paths, exts):
last_dif_commit_date = None
for file_path, ext in itertools.product(file_paths, exts):
full_file_path = file_path.with_suffix(ext)
try:
repo = pydriller.Repository(
str(REPO_TOP), filepath=full_file_path).traverse_commits()
except FileNotFoundError:
logging.error(
f"(date) file path ({full_file_path}) does not exist.")
sys.exit(1)
for commit in repo:
if last_dif_commit_date is None:
last_dif_commit_date = commit.author_date
else:
last_dif_commit_date = max(last_dif_commit_date,
commit.author_date)
return last_dif_commit_date.strftime("%Y-%m-%d %H:%M:%S")
def _get_funcs_unimplemented(self):
defined_funcs = self._get_defined_funcs()
implemented_funcs = self._get_implemented_funcs()
self.num_functions_defined = len(defined_funcs)
self.num_functions_implemented = len(implemented_funcs)
self.api_complete = bool(defined_funcs and
defined_funcs == implemented_funcs)
if len(defined_funcs) < len(implemented_funcs):
logging.warning(
f"number of defined functions is less than implemented "
f"functions for {self.ip}. Results possibly invalid.")
print("Functions missing definitions:")
for impl_func in implemented_funcs:
if impl_func not in defined_funcs:
print(f"\t{impl_func}")
return defined_funcs - implemented_funcs
def _get_defined_funcs(self):
header_file = self.dif_path.with_suffix(".h")
autogen_header_file = self.dif_autogen_path.with_suffix(".h")
defined_funcs = self._get_funcs(header_file)
defined_funcs |= self._get_funcs(autogen_header_file)
return defined_funcs
def _get_implemented_funcs(self):
c_file = self.dif_path.with_suffix(".c")
c_autogen_file = self.dif_autogen_path.with_suffix(".c")
# The autogenerated header should always exist if the DIF has been
# started.
implemented_funcs = self._get_funcs(c_autogen_file)
# However, the manually-implemented header may not exist yet.
# If no .c file exists --> All functions are undefined.
if c_file.is_file():
implemented_funcs |= self._get_funcs(c_file)
return implemented_funcs
def _get_funcs(self, file_path):
func_pattern = re.compile(r"dif_result_t (dif_.*)\(.*")
funcs = set()
with open(file_path, "r") as fp:
for line in fp:
result = func_pattern.search(line)
if result is not None and not line.startswith("static"):
funcs.add(result.group(1))
return funcs
def print_status_table(dif_statuses: List[DIFStatus],
table_format: str) -> None:
"""Print a table of DIF status information to STDOUT.
Args:
dif_statuses: List of DIFStatus objects containing metadata about DIF
development states.
Returns:
None
"""
# Build the table.
rows = []
headers = [
"IP", "DIF Updated", "HW Updated", "DIF Contributor*",
"HW Contributor*", "Functions\nDefined", "Functions\nImplemented",
"Stage"
]
for dif_status in dif_statuses:
# Color code last modified dates.
# Limit the last modified strings to 10 characters to only print the
# date (YYYY-MM-DD).
hw_last_modified = dif_status.hw_last_modified[:10]
dif_last_modified = dif_status.dif_last_modified[:10]
if dif_status.hw_last_modified > dif_status.dif_last_modified:
hw_last_modified = colored(hw_last_modified, "yellow")
dif_last_modified = colored(dif_last_modified, "yellow")
# Color code API complete status.
if dif_status.api_complete:
num_funcs_defined = colored(dif_status.num_functions_defined,
"green")
num_funcs_implemented = colored(
dif_status.num_functions_implemented, "green")
else:
num_funcs_defined = colored(dif_status.num_functions_defined,
"red")
num_funcs_implemented = colored(
dif_status.num_functions_implemented, "red")
# Add row to table (printing one contributor email per line).
rows.append([
dif_status.ip, dif_last_modified, hw_last_modified,
"\n".join(dif_status.dif_main_contributors),
"\n".join(dif_status.hw_main_contributors), num_funcs_defined,
num_funcs_implemented, dif_status.lifecycle_state
])
# Print the table and legend.
print("DIF Statuses:")
print(tabulate(rows, headers, tablefmt=table_format))
print("""*Only the top two contributors (by LOC) """
"""for each component are listed.""")
print(colored("Yellow", "yellow"),
"\t= HW has been updated since the DIF.")
print(
colored("Green", "green"),
"""\t= DIF API, as defined in the current header file, is complete. """
"""Note, the header file may lack necessary API functionality.""")
print(colored("Red", "red"),
("\t= DIF API is incomplete, as defined in the header file or the "
"work has not yet begun."))
def print_unimplemented_difs(dif_statuses: List[DIFStatus],
table_format: str) -> None:
"""Print a table of specific functions names DIF functions to STDOUT.
Args:
dif_statuses: List of DIFStatus objects containing metadata about DIF
development states.
table_format: Format of output table to print. See tabulate module.
Returns:
None
"""
# Build and print table.
print("Unimplemented Functions:")
rows = []
headers = ["IP", "Function"]
for dif_status in dif_statuses:
if not dif_status.api_complete:
rows.append(
[dif_status.ip, "\n".join(dif_status.funcs_unimplemented)])
print(tabulate(rows, headers, tablefmt=table_format))
def main(argv):
# Process args and set logging level.
# TODO: parallelize data scraping so its much faster
parser = argparse.ArgumentParser(
prog="check_dif_statuses",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--show-unimplemented",
action="store_true",
help="""Show unimplemented functions for each incomplete DIF.""")
parser.add_argument("--table-format",
type=str,
choices=["grid", "github", "pipe"],
default="grid",
help="""Format to print status tables in.""")
parser.add_argument(
"top_hjson",
help="""Path to the top-level HJSON configuration file relative to
REPO_TOP.""")
args = parser.parse_args(argv)
logging.basicConfig(level=logging.WARNING)
# Make sure to call this script from REPO_TOP.
if Path.cwd() != REPO_TOP:
logging.error(f"Must call script from \"$REPO_TOP\": {REPO_TOP}")
sys.exit(1)
# Get the list of IP blocks by invoking the topgen tool.
topgen_tool = REPO_TOP / "util/topgen.py"
top_hjson = REPO_TOP / args.top_hjson
top_level = top_hjson.stem
top_hjson_text = top_hjson.read_text()
topcfg = hjson.loads(top_hjson_text, use_decimal=True)
ipgen_ips = lib.get_ipgen_modules(topcfg)
# yapf: disable
topgen_process = subprocess.run([topgen_tool, "-t", top_hjson,
"--get_blocks", "-o", REPO_TOP],
universal_newlines=True,
stdout=subprocess.PIPE,
check=True)
# yapf: enable
# All DIF names are prefixed with `dif_`.
difs = {f"dif_{dif.strip()}" for dif in topgen_process.stdout.split()}
# Get DIF statuses (while displaying a progress bar).
dif_statuses = []
progress_bar = enlighten.Counter(total=len(difs),
desc="Analyzing statuses of DIFs ...",
unit="DIFs")
for dif in difs:
dif_statuses.append(DIFStatus(ipgen_ips, top_level, dif))
progress_bar.update()
dif_statuses.sort(key=lambda x: x.ip)
# Build table and print it to STDOUT.
print_status_table(dif_statuses, args.table_format)
if args.show_unimplemented:
print_unimplemented_difs(dif_statuses, args.table_format)
if __name__ == "__main__":
main(sys.argv[1:])
| |
import itertools
import pytest
import logging
from cassandra.query import dict_factory
from dtest import RUN_STATIC_UPGRADE_MATRIX, Tester
from thrift_bindings.thrift010 import Cassandra
from thrift_bindings.thrift010.Cassandra import (Column, ColumnDef,
ColumnParent, ConsistencyLevel,
SlicePredicate, SliceRange)
from thrift_test import _i64, get_thrift_client
from tools.assertions import assert_length_equal, assert_lists_of_dicts_equal
from tools.misc import wait_for_agreement, add_skip
from .upgrade_base import UpgradeTester
from .upgrade_manifest import build_upgrade_pairs
since = pytest.mark.since
logger = logging.getLogger(__name__)
def _create_dense_super_cf(thrift, name):
cfdef = Cassandra.CfDef('ks', name, column_type='Super',
key_validation_class='AsciiType', # pk
comparator_type='AsciiType', # ck
default_validation_class='AsciiType', # SC value
subcomparator_type='LongType') # SC key
thrift.system_add_column_family(cfdef)
wait_for_agreement(thrift)
def _create_sparse_super_cf(thrift, name):
cd1 = ColumnDef('col1'.encode(), 'LongType', None, None)
cd2 = ColumnDef('col2'.encode(), 'LongType', None, None)
cfdef = Cassandra.CfDef('ks', name, column_type='Super',
column_metadata=[cd1, cd2],
key_validation_class='AsciiType',
comparator_type='AsciiType',
subcomparator_type='AsciiType')
thrift.system_add_column_family(cfdef)
wait_for_agreement(thrift)
def unpack(lst):
result_list = []
for item_dict in lst:
normalized_dict = {}
for key, value in item_dict.items():
if hasattr(value, "items"):
assert(key == '')
for a, b in value.items():
normalized_dict[a] = b
else:
normalized_dict[key] = value
result_list.append(normalized_dict)
return result_list
def add_value(list):
"""Helper for _validate_sparse_cql to modify expected results based"""
for item in list:
key = item.get('key', None)
if key is None:
key = item.get('renamed_key')
value_key = 'value1' if key == 'k1' else 'value2'
item[value_key]=_i64(100)
def _validate_sparse_cql(cursor, cf='sparse_super_1', column1='column1', col1='col1', col2='col2', key='key', is_version_4_or_greater=False):
cursor.execute('use ks')
result = unpack(list(cursor.execute("SELECT * FROM {}".format(cf))))
expected = [{key: 'k1', column1: 'key1', col1: 200, col2: 300},
{key: 'k1', column1: 'key2', col1: 200, col2: 300},
{key: 'k2', column1: 'key1', col1: 200, col2: 300},
{key: 'k2', column1: 'key2', col1: 200, col2: 300}]
if is_version_4_or_greater:
add_value(expected)
assert_lists_of_dicts_equal(result, expected)
result = unpack(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1'".format(cf, key))))
expected = [{key: 'k1', column1: 'key1', col1: 200, col2: 300},
{key: 'k1', column1: 'key2', col1: 200, col2: 300}]
if is_version_4_or_greater:
add_value(expected)
assert_lists_of_dicts_equal(result, expected)
result = unpack(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k2' AND {} = 'key1'".format(cf, key, column1))))
expected = [{key: 'k2', column1: 'key1', col1: 200, col2: 300}]
if is_version_4_or_greater:
add_value(expected)
assert_lists_of_dicts_equal(result, expected)
def _validate_sparse_thrift(client, cf='sparse_super_1'):
try:
client.transport.open()
except:
pass
client.set_keyspace('ks')
result = client.get_slice('k1'.encode(), ColumnParent(cf), SlicePredicate(slice_range=SliceRange(''.encode(), ''.encode(), False, 5)), ConsistencyLevel.ONE)
assert_length_equal(result, 2)
assert result[0].super_column.name == 'key1'.encode()
assert result[1].super_column.name == 'key2'.encode()
for cosc in result:
assert cosc.super_column.columns[0].name == 'col1'.encode()
assert cosc.super_column.columns[0].value == _i64(200)
assert cosc.super_column.columns[1].name == 'col2'.encode()
assert cosc.super_column.columns[1].value == _i64(300)
assert cosc.super_column.columns[2].name == 'value1'.encode()
assert cosc.super_column.columns[2].value == _i64(100)
def _validate_dense_cql(cursor, cf='dense_super_1', key='key', column1='column1', column2='column2', value='value', is_version_4_or_greater=False):
cursor.execute('use ks')
expected = [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'},
{key: 'k1', column1: 'key2', column2: 100, value: 'value1'},
{key: 'k2', column1: 'key1', column2: 200, value: 'value2'},
{key: 'k2', column1: 'key2', column2: 200, value: 'value2'}]
if is_version_4_or_greater:
expected[0][100]='value1'
expected[1][100]='value1'
expected[2][200]='value2'
expected[3][200]='value2'
for dict in expected:
del dict[value]
for dict in expected:
del dict[column2]
result = unpack(list(cursor.execute("SELECT * FROM {}".format(cf))))
assert_lists_of_dicts_equal(result, expected)
result = unpack(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1'".format(cf, key))))
expected = [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'},
{key: 'k1', column1: 'key2', column2: 100, value: 'value1'}]
if is_version_4_or_greater:
expected[0][100]='value1'
expected[1][100]='value1'
for dict in expected:
del dict[value]
for dict in expected:
del dict[column2]
assert_lists_of_dicts_equal(result, expected)
result = unpack(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1' AND {} = 'key1'".format(cf, key, column1))))
expected = [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'}]
if is_version_4_or_greater:
expected[0][100]='value1'
for dict in expected:
del dict[value]
for dict in expected:
del dict[column2]
assert_lists_of_dicts_equal(result, expected)
if is_version_4_or_greater:
result = unpack(list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1' AND {} = 'key1' AND \"\" CONTAINS KEY 100 ALLOW FILTERING".format(cf, key, column1, column2))))
else:
result = list(cursor.execute("SELECT * FROM {} WHERE {} = 'k1' AND {} = 'key1' AND {} = 100".format(cf, key, column1, column2)))
expected = [{key: 'k1', column1: 'key1', column2: 100, value: 'value1'}]
if is_version_4_or_greater:
expected[0][100]='value1'
for dict in expected:
del dict[value]
for dict in expected:
del dict[column2]
assert_lists_of_dicts_equal(result, expected)
def _validate_dense_thrift(client, cf='dense_super_1'):
try:
client.transport.open()
except:
pass
client.set_keyspace('ks')
result = client.get_slice('k1'.encode(), ColumnParent(cf), SlicePredicate(slice_range=SliceRange(''.encode(), ''.encode(), False, 5)), ConsistencyLevel.ONE)
assert_length_equal(result, 2)
assert result[0].super_column.name == 'key1'.encode()
assert result[1].super_column.name == 'key2'.encode()
print((result[0]))
print((result[1]))
for cosc in result:
assert cosc.super_column.columns[0].name == _i64(100)
assert cosc.super_column.columns[0].value == 'value1'.encode()
@pytest.mark.upgrade_test
class TestUpgradeSuperColumnsThrough(Tester):
def upgrade_to_version(self, tag, nodes=None):
logger.debug('Upgrading to ' + tag)
if nodes is None:
nodes = self.cluster.nodelist()
for node in nodes:
logger.debug('Shutting down node: ' + node.name)
node.drain()
node.watch_log_for("DRAINED")
node.stop(wait_other_notice=False)
# Update Cassandra Directory
for node in nodes:
node.set_install_dir(version=tag)
node.set_configuration_options(values={'start_rpc': 'true'})
logger.debug("Set new cassandra dir for %s: %s" % (node.name, node.get_install_dir()))
self.cluster.set_install_dir(version=tag)
self.fixture_dtest_setup.reinitialize_cluster_for_different_version()
for node in nodes:
node.set_configuration_options(values={'start_rpc': 'true'})
# Restart nodes on new version
for node in nodes:
logger.debug('Starting %s on new version (%s)' % (node.name, tag))
# Setup log4j / logback again (necessary moving from 2.0 -> 2.1):
node.set_log_level("INFO")
node.start(wait_other_notice=True, wait_for_binary_proto=True)
node.nodetool('upgradesstables -a')
def prepare(self, num_nodes=1, cassandra_version="github:apache/cassandra-2.2"):
cluster = self.cluster
# Forcing cluster version on purpose
cluster.set_install_dir(version=cassandra_version)
self.fixture_dtest_setup.reinitialize_cluster_for_different_version()
cluster.populate(num_nodes)
for node in self.cluster.nodelist():
node.set_configuration_options(values={'start_rpc': 'true'})
cluster.start()
return cluster
def test_dense_supercolumn_3_0_created(self):
cluster = self.prepare(cassandra_version='github:apache/cassandra-3.0')
node = self.cluster.nodelist()[0]
cursor = self.patient_cql_connection(node, row_factory=dict_factory)
cursor.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor': '1' };")
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
_create_dense_super_cf(client, 'dense_super_1')
for i in range(1, 3):
client.insert('k1'.encode(), ColumnParent('dense_super_1', 'key{}'.format(i).encode()), Column(_i64(100), 'value1'.encode(), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('dense_super_1', 'key{}'.format(i).encode()), Column(_i64(200), 'value2'.encode(), 0), ConsistencyLevel.ONE)
_validate_dense_thrift(client, cf='dense_super_1')
self.set_node_to_current_version(node)
#4.0 doesn't support compact storage
if node.get_cassandra_version() >= '4':
cursor.execute("ALTER TABLE ks.dense_super_1 DROP COMPACT STORAGE;")
node.stop()
if node.get_cassandra_version() < '4':
node.set_configuration_options(values={'start_rpc': 'true'})
node.start()
cursor = self.patient_cql_connection(node, row_factory=dict_factory)
if node.get_cassandra_version() < '4':
client = get_thrift_client(host, port)
_validate_dense_thrift(client, cf='dense_super_1')
_validate_dense_cql(cursor, cf='dense_super_1', is_version_4_or_greater=node.get_cassandra_version() >= '4')
def test_dense_supercolumn(self):
cluster = self.prepare()
node = self.cluster.nodelist()[0]
node.nodetool("enablethrift")
cursor = self.patient_cql_connection(node, row_factory=dict_factory)
cursor.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor': '1' };")
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
_create_dense_super_cf(client, 'dense_super_1')
for i in range(1, 3):
client.insert('k1'.encode(), ColumnParent('dense_super_1', 'key{}'.format(i).encode()), Column(_i64(100), 'value1'.encode(), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('dense_super_1', 'key{}'.format(i).encode()), Column(_i64(200), 'value2'.encode(), 0), ConsistencyLevel.ONE)
_validate_dense_thrift(client, cf='dense_super_1')
_validate_dense_cql(cursor, cf='dense_super_1')
self.upgrade_to_version('github:apache/cassandra-3.0')
cursor = self.patient_cql_connection(node, row_factory=dict_factory)
client = get_thrift_client(host, port)
_validate_dense_thrift(client, cf='dense_super_1')
self.set_node_to_current_version(node)
#4.0 doesn't support compact storage
if node.get_cassandra_version() >= '4':
cursor.execute("ALTER TABLE ks.dense_super_1 DROP COMPACT STORAGE;")
node.stop()
if node.get_cassandra_version() < '4':
node.set_configuration_options(values={'start_rpc': 'true'})
node.start()
if node.get_cassandra_version() < '4':
client = get_thrift_client(host, port)
_validate_dense_thrift(client, cf='dense_super_1')
cursor = self.patient_cql_connection(node, row_factory=dict_factory)
_validate_dense_cql(cursor, cf='dense_super_1', is_version_4_or_greater=node.get_cassandra_version() >= '4')
def test_sparse_supercolumn(self):
cluster = self.prepare()
node = self.cluster.nodelist()[0]
node.nodetool("enablethrift")
cursor = self.patient_cql_connection(node, row_factory=dict_factory)
cursor.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy','replication_factor': '1' };")
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
_create_sparse_super_cf(client, 'sparse_super_2')
for i in range(1, 3):
client.insert('k1'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("value1".encode(), _i64(100), 0), ConsistencyLevel.ONE)
client.insert('k1'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col1".encode(), _i64(200), 0), ConsistencyLevel.ONE)
client.insert('k1'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col2".encode(), _i64(300), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("value2".encode(), _i64(100), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col1".encode(), _i64(200), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col2".encode(), _i64(300), 0), ConsistencyLevel.ONE)
_validate_sparse_thrift(client, cf='sparse_super_2')
_validate_sparse_cql(cursor, cf='sparse_super_2')
self.upgrade_to_version('github:apache/cassandra-3.0')
cursor = self.patient_cql_connection(node, row_factory=dict_factory)
client = get_thrift_client(host, port)
_validate_sparse_thrift(client, cf='sparse_super_2')
self.set_node_to_current_version(node)
is_version_4_or_greater = node.get_cassandra_version() >= '4'
#4.0 doesn't support compact storage
if is_version_4_or_greater:
cursor.execute("ALTER TABLE ks.sparse_super_2 DROP COMPACT STORAGE;")
node.stop()
if not is_version_4_or_greater:
node.set_configuration_options(values={'start_rpc': 'true'})
node.start()
if not is_version_4_or_greater:
client = get_thrift_client(host, port)
_validate_sparse_thrift(client, cf='sparse_super_2')
cursor = self.patient_cql_connection(node, row_factory=dict_factory)
_validate_sparse_cql(cursor, cf='sparse_super_2', is_version_4_or_greater=is_version_4_or_greater)
@pytest.mark.upgrade_test
@since('2.1', max_version='3.99')
class TestThrift(UpgradeTester):
"""
Verify dense and sparse supercolumn functionality with and without renamed columns
in 3.X after upgrading from 2.x.
@jira_ticket CASSANDRA-12373
"""
def test_dense_supercolumn(self):
cursor = self.prepare(nodes=2, rf=2, row_factory=dict_factory)
cluster = self.cluster
node = self.cluster.nodelist()[0]
node.nodetool("enablethrift")
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
_create_dense_super_cf(client, 'dense_super_1')
for i in range(1, 3):
client.insert('k1'.encode(), ColumnParent('dense_super_1', 'key{}'.format(i).encode()), Column(_i64(100), 'value1'.encode(), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('dense_super_1', 'key{}'.format(i).encode()), Column(_i64(200), 'value2'.encode(), 0), ConsistencyLevel.ONE)
_validate_dense_cql(cursor)
_validate_dense_thrift(client)
version_string = self.upgrade_version_string()
is_version_4_or_greater = version_string == 'trunk' or version_string >= '4.0'
#4.0 doesn't support compact storage
if is_version_4_or_greater:
cursor.execute("ALTER TABLE ks.dense_super_1 DROP COMPACT STORAGE;")
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
if not is_version_4_or_greater:
client = get_thrift_client(host, port)
_validate_dense_thrift(client)
_validate_dense_cql(cursor, is_version_4_or_greater=is_version_4_or_greater)
def test_dense_supercolumn_with_renames(self):
cursor = self.prepare(row_factory=dict_factory)
cluster = self.cluster
node = self.cluster.nodelist()[0]
node.nodetool("enablethrift")
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
_create_dense_super_cf(client, 'dense_super_2')
for i in range(1, 3):
client.insert('k1'.encode(), ColumnParent('dense_super_2', 'key{}'.format(i).encode()), Column(_i64(100), 'value1'.encode(), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('dense_super_2', 'key{}'.format(i).encode()), Column(_i64(200), 'value2'.encode(), 0), ConsistencyLevel.ONE)
cursor.execute("ALTER TABLE ks.dense_super_2 RENAME key TO renamed_key")
cursor.execute("ALTER TABLE ks.dense_super_2 RENAME column1 TO renamed_column1")
cursor.execute("ALTER TABLE ks.dense_super_2 RENAME column2 TO renamed_column2")
cursor.execute("ALTER TABLE ks.dense_super_2 RENAME value TO renamed_value")
_validate_dense_cql(cursor, cf='dense_super_2', key='renamed_key', column1='renamed_column1', column2='renamed_column2', value='renamed_value')
_validate_dense_thrift(client, cf='dense_super_2')
version_string = self.upgrade_version_string()
is_version_4_or_greater = version_string == 'trunk' or version_string >= '4.0'
#4.0 doesn't support compact storage
if is_version_4_or_greater:
cursor.execute("ALTER TABLE ks.dense_super_2 DROP COMPACT STORAGE;")
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
if not is_version_4_or_greater:
client = get_thrift_client(host, port)
_validate_dense_thrift(client, cf='dense_super_2')
_validate_dense_cql(cursor, cf='dense_super_2', key='renamed_key', column1='renamed_column1', column2='renamed_column2', value='renamed_value', is_version_4_or_greater=is_version_4_or_greater)
def test_sparse_supercolumn_with_renames(self):
cursor = self.prepare(row_factory=dict_factory)
cluster = self.cluster
node = self.cluster.nodelist()[0]
node.nodetool("enablethrift")
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
_create_sparse_super_cf(client, 'sparse_super_1')
cursor.execute("ALTER TABLE ks.sparse_super_1 RENAME key TO renamed_key")
cursor.execute("ALTER TABLE ks.sparse_super_1 RENAME column1 TO renamed_column1")
for i in range(1, 3):
client.insert('k1'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("value1".encode(), _i64(100), 0), ConsistencyLevel.ONE)
client.insert('k1'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("col1".encode(), _i64(200), 0), ConsistencyLevel.ONE)
client.insert('k1'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("col2".encode(), _i64(300), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("value2".encode(), _i64(100), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("col1".encode(), _i64(200), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('sparse_super_1', 'key{}'.format(i).encode()), Column("col2".encode(), _i64(300), 0), ConsistencyLevel.ONE)
_validate_sparse_thrift(client)
_validate_sparse_cql(cursor, column1='renamed_column1', key='renamed_key')
version_string = self.upgrade_version_string()
is_version_4_or_greater = version_string == 'trunk' or version_string >= '4.0'
#4.0 doesn't support compact storage
if is_version_4_or_greater:
cursor.execute("ALTER TABLE ks.sparse_super_1 DROP COMPACT STORAGE;")
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
if not is_version_4_or_greater:
client = get_thrift_client(host, port)
_validate_sparse_thrift(client)
_validate_sparse_cql(cursor, column1='renamed_column1', key='renamed_key', is_version_4_or_greater=is_version_4_or_greater)
def test_sparse_supercolumn(self):
cursor = self.prepare(row_factory=dict_factory)
cluster = self.cluster
node = self.cluster.nodelist()[0]
node.nodetool("enablethrift")
host, port = node.network_interfaces['thrift']
client = get_thrift_client(host, port)
client.transport.open()
client.set_keyspace('ks')
_create_sparse_super_cf(client, 'sparse_super_2')
for i in range(1, 3):
client.insert('k1'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("value1".encode(), _i64(100), 0), ConsistencyLevel.ONE)
client.insert('k1'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col1".encode(), _i64(200), 0), ConsistencyLevel.ONE)
client.insert('k1'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col2".encode(), _i64(300), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("value2".encode(), _i64(100), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col1".encode(), _i64(200), 0), ConsistencyLevel.ONE)
client.insert('k2'.encode(), ColumnParent('sparse_super_2', 'key{}'.format(i).encode()), Column("col2".encode(), _i64(300), 0), ConsistencyLevel.ONE)
_validate_sparse_thrift(client, cf='sparse_super_2')
_validate_sparse_cql(cursor, cf='sparse_super_2')
version_string = self.upgrade_version_string()
is_version_4_or_greater = version_string == 'trunk' or version_string >= '4.0'
#4.0 doesn't support compact storage
if is_version_4_or_greater:
cursor.execute("ALTER TABLE ks.sparse_super_2 DROP COMPACT STORAGE;")
for is_upgraded, cursor in self.do_upgrade(cursor, row_factory=dict_factory, use_thrift=True):
logger.debug("Querying {} node".format("upgraded" if is_upgraded else "old"))
if not is_version_4_or_greater:
client = get_thrift_client(host, port)
_validate_sparse_thrift(client, cf='sparse_super_2')
_validate_sparse_cql(cursor, cf='sparse_super_2', is_version_4_or_greater=is_version_4_or_greater)
topology_specs = [
{'NODES': 3,
'RF': 3,
'CL': ConsistencyLevel.ALL},
{'NODES': 2,
'RF': 1},
]
specs = [dict(s, UPGRADE_PATH=p, __test__=True)
for s, p in itertools.product(topology_specs, build_upgrade_pairs())]
for spec in specs:
suffix = 'Nodes{num_nodes}RF{rf}_{pathname}'.format(num_nodes=spec['NODES'],
rf=spec['RF'],
pathname=spec['UPGRADE_PATH'].name)
gen_class_name = TestThrift.__name__ + suffix
assert gen_class_name not in globals()
upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or spec['UPGRADE_PATH'].upgrade_meta.matches_current_env_version_family
cls = type(gen_class_name, (TestThrift,), spec)
if not upgrade_applies_to_env:
add_skip(cls, 'test not applicable to env.')
globals()[gen_class_name] = cls
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import hashlib
import json
from oslo_log import log as logging
import six
from trove.common import cfg
from trove.common.i18n import _
from trove.common.remote import create_swift_client
from trove.common.strategies.storage import base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CHUNK_SIZE = CONF.backup_chunk_size
MAX_FILE_SIZE = CONF.backup_segment_max_size
BACKUP_CONTAINER = CONF.backup_swift_container
class DownloadError(Exception):
"""Error running the Swift Download Command."""
class SwiftDownloadIntegrityError(Exception):
"""Integrity error while running the Swift Download Command."""
class StreamReader(object):
"""Wrap the stream from the backup process and chunk it into segements."""
def __init__(self, stream, filename, max_file_size=MAX_FILE_SIZE):
self.stream = stream
self.filename = filename
self.container = BACKUP_CONTAINER
self.max_file_size = max_file_size
self.segment_length = 0
self.process = None
self.file_number = 0
self.end_of_file = False
self.end_of_segment = False
self.segment_checksum = hashlib.md5()
@property
def base_filename(self):
"""Filename with extensions removed."""
return self.filename.split('.')[0]
@property
def segment(self):
return '%s_%08d' % (self.base_filename, self.file_number)
@property
def first_segment(self):
return '%s_%08d' % (self.base_filename, 0)
@property
def segment_path(self):
return '%s/%s' % (self.container, self.segment)
def read(self, chunk_size=CHUNK_SIZE):
if self.end_of_segment:
self.segment_length = 0
self.segment_checksum = hashlib.md5()
self.end_of_segment = False
# Upload to a new file if we are starting or too large
if self.segment_length > (self.max_file_size - chunk_size):
self.file_number += 1
self.end_of_segment = True
return ''
chunk = self.stream.read(chunk_size)
if not chunk:
self.end_of_file = True
return ''
self.segment_checksum.update(chunk)
self.segment_length += len(chunk)
return chunk
class SwiftStorage(base.Storage):
"""Implementation of Storage Strategy for Swift."""
__strategy_name__ = 'swift'
def __init__(self, *args, **kwargs):
super(SwiftStorage, self).__init__(*args, **kwargs)
self.connection = create_swift_client(self.context)
def save(self, filename, stream, metadata=None):
"""Persist information from the stream to swift.
The file is saved to the location <BACKUP_CONTAINER>/<filename>.
It will be a Swift Static Large Object (SLO).
The filename is defined on the backup runner manifest property
which is typically in the format '<backup_id>.<ext>.gz'
"""
LOG.info(_('Saving %(filename)s to %(container)s in swift.'),
{'filename': filename, 'container': BACKUP_CONTAINER})
# Create the container if it doesn't already exist
LOG.debug('Creating container %s.', BACKUP_CONTAINER)
self.connection.put_container(BACKUP_CONTAINER)
# Swift Checksum is the checksum of the concatenated segment checksums
swift_checksum = hashlib.md5()
# Wrap the output of the backup process to segment it for swift
stream_reader = StreamReader(stream, filename, MAX_FILE_SIZE)
LOG.debug('Using segment size %s', stream_reader.max_file_size)
url = self.connection.url
# Full location where the backup manifest is stored
location = "%s/%s/%s" % (url, BACKUP_CONTAINER, filename)
# Information about each segment upload job
segment_results = []
# Read from the stream and write to the container in swift
while not stream_reader.end_of_file:
LOG.debug('Saving segment %s.', stream_reader.segment)
path = stream_reader.segment_path
etag = self.connection.put_object(BACKUP_CONTAINER,
stream_reader.segment,
stream_reader)
segment_checksum = stream_reader.segment_checksum.hexdigest()
# Check each segment MD5 hash against swift etag
# Raise an error and mark backup as failed
if etag != segment_checksum:
LOG.error(_("Error saving data segment to swift. "
"ETAG: %(tag)s Segment MD5: %(checksum)s."),
{'tag': etag, 'checksum': segment_checksum})
return False, "Error saving data to Swift!", None, location
segment_results.append({
'path': path,
'etag': etag,
'size_bytes': stream_reader.segment_length
})
if six.PY3:
swift_checksum.update(segment_checksum.encode())
else:
swift_checksum.update(segment_checksum)
# All segments uploaded.
num_segments = len(segment_results)
LOG.debug('File uploaded in %s segments.', num_segments)
# An SLO will be generated if the backup was more than one segment in
# length.
large_object = num_segments > 1
# Meta data is stored as headers
if metadata is None:
metadata = {}
metadata.update(stream.metadata())
headers = {}
for key, value in metadata.items():
headers[self._set_attr(key)] = value
LOG.debug('Metadata headers: %s', str(headers))
if large_object:
LOG.info(_('Creating the manifest file.'))
manifest_data = json.dumps(segment_results)
LOG.debug('Manifest contents: %s', manifest_data)
# The etag returned from the manifest PUT is the checksum of the
# manifest object (which is empty); this is not the checksum we
# want.
self.connection.put_object(BACKUP_CONTAINER,
filename,
manifest_data,
query_string='multipart-manifest=put')
# Validation checksum is the Swift Checksum
final_swift_checksum = swift_checksum.hexdigest()
else:
LOG.info(_('Backup fits in a single segment. Moving segment '
'%(segment)s to %(filename)s.'),
{'segment': stream_reader.first_segment,
'filename': filename})
segment_result = segment_results[0]
# Just rename it via a special put copy.
headers['X-Copy-From'] = segment_result['path']
self.connection.put_object(BACKUP_CONTAINER,
filename, '',
headers=headers)
# Delete the old segment file that was copied
LOG.debug('Deleting the old segment file %s.',
stream_reader.first_segment)
self.connection.delete_object(BACKUP_CONTAINER,
stream_reader.first_segment)
final_swift_checksum = segment_result['etag']
# Validate the object by comparing checksums
# Get the checksum according to Swift
resp = self.connection.head_object(BACKUP_CONTAINER, filename)
# swift returns etag in double quotes
# e.g. '"dc3b0827f276d8d78312992cc60c2c3f"'
etag = resp['etag'].strip('"')
# Raise an error and mark backup as failed
if etag != final_swift_checksum:
LOG.error(
_("Error saving data to swift. Manifest "
"ETAG: %(tag)s Swift MD5: %(checksum)s"),
{'tag': etag, 'checksum': final_swift_checksum})
return False, "Error saving data to Swift!", None, location
return (True, "Successfully saved data to Swift!",
final_swift_checksum, location)
def _explodeLocation(self, location):
storage_url = "/".join(location.split('/')[:-2])
container = location.split('/')[-2]
filename = location.split('/')[-1]
return storage_url, container, filename
def _verify_checksum(self, etag, checksum):
etag_checksum = etag.strip('"')
if etag_checksum != checksum:
msg = (_("Original checksum: %(original)s does not match"
" the current checksum: %(current)s") %
{'original': etag_checksum, 'current': checksum})
LOG.error(msg)
raise SwiftDownloadIntegrityError(msg)
return True
def load(self, location, backup_checksum):
"""Restore a backup from the input stream to the restore_location."""
storage_url, container, filename = self._explodeLocation(location)
headers, info = self.connection.get_object(container, filename,
resp_chunk_size=CHUNK_SIZE)
if CONF.verify_swift_checksum_on_restore:
self._verify_checksum(headers.get('etag', ''), backup_checksum)
return info
def _get_attr(self, original):
"""Get a friendly name from an object header key."""
key = original.replace('-', '_')
key = key.replace('x_object_meta_', '')
return key
def _set_attr(self, original):
"""Return a swift friendly header key."""
key = original.replace('_', '-')
return 'X-Object-Meta-%s' % key
def load_metadata(self, location, backup_checksum):
"""Load metadata from swift."""
storage_url, container, filename = self._explodeLocation(location)
headers = self.connection.head_object(container, filename)
if CONF.verify_swift_checksum_on_restore:
self._verify_checksum(headers.get('etag', ''), backup_checksum)
_meta = {}
for key, value in headers.items():
if key.startswith('x-object-meta'):
_meta[self._get_attr(key)] = value
return _meta
def save_metadata(self, location, metadata={}):
"""Save metadata to a swift object."""
storage_url, container, filename = self._explodeLocation(location)
headers = {}
for key, value in metadata.items():
headers[self._set_attr(key)] = value
LOG.info(_("Writing metadata: %s"), str(headers))
self.connection.post_object(container, filename, headers=headers)
| |
from twisted.trial import unittest
import os
from twisted.internet import defer, reactor, error
from telephus.protocol import ManagedCassandraClientFactory
from telephus.client import CassandraClient
from telephus.cassandra import ttypes
CONNS = 5
HOST = os.environ.get('CASSANDRA_HOST', 'localhost')
PORT = 9160
KEYSPACE = 'TelephusTests'
T_KEYSPACE = 'TelephusTests2'
CF = 'Standard1'
SCF = 'Super1'
COUNTER_CF = 'Counter1'
SUPERCOUNTER_CF = 'SuperCounter1'
IDX_CF = 'IdxTestCF'
T_CF = 'TransientCF'
T_SCF = 'TransientSCF'
COLUMN = 'foo'
COLUMN2 = 'foo2'
SCOLUMN = 'bar'
# RF for SimpleStrategy keyspaces should be set on the 'replication_factor'
# attribute of KsDefs below this version
KS_RF_ATTRIBUTE = (19, 4, 0)
COUNTERS_SUPPORTED_API = (19, 10, 0)
# until Cassandra supports these again..
DO_SYSTEM_RENAMING = False
class CassandraClientTest(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.cmanager = ManagedCassandraClientFactory(keyspace='system')
self.client = CassandraClient(self.cmanager)
for i in xrange(CONNS):
reactor.connectTCP(HOST, PORT, self.cmanager)
yield self.cmanager.deferred
remote_ver = yield self.client.describe_version()
self.version = tuple(map(int, remote_ver.split('.')))
self.my_keyspace = ttypes.KsDef(
name=KEYSPACE,
strategy_class='org.apache.cassandra.locator.SimpleStrategy',
strategy_options={},
cf_defs=[
ttypes.CfDef(
keyspace=KEYSPACE,
name=CF,
column_type='Standard'
),
ttypes.CfDef(
keyspace=KEYSPACE,
name=SCF,
column_type='Super'
),
ttypes.CfDef(
keyspace=KEYSPACE,
name=IDX_CF,
column_type='Standard',
comparator_type='org.apache.cassandra.db.marshal.UTF8Type',
column_metadata=[
ttypes.ColumnDef(
name='col1',
validation_class='org.apache.cassandra.db.marshal.UTF8Type',
index_type=ttypes.IndexType.KEYS,
index_name='idxCol1')
],
default_validation_class='org.apache.cassandra.db.marshal.BytesType'
),
]
)
if self.version <= KS_RF_ATTRIBUTE:
self.my_keyspace.replication_factor = 1
else:
self.my_keyspace.strategy_options['replication_factor'] = '1'
if self.version >= COUNTERS_SUPPORTED_API:
self.my_keyspace.cf_defs.extend([
ttypes.CfDef(
keyspace=KEYSPACE,
name=COUNTER_CF,
column_type='Standard',
default_validation_class='org.apache.cassandra.db.marshal.CounterColumnType'
),
ttypes.CfDef(
keyspace=KEYSPACE,
name=SUPERCOUNTER_CF,
column_type='Super',
default_validation_class='org.apache.cassandra.db.marshal.CounterColumnType'
),
])
yield self.client.system_add_keyspace(self.my_keyspace)
yield self.client.set_keyspace(KEYSPACE)
@defer.inlineCallbacks
def tearDown(self):
yield self.client.system_drop_keyspace(self.my_keyspace.name)
self.cmanager.shutdown()
for c in reactor.getDelayedCalls():
c.cancel()
reactor.removeAll()
@defer.inlineCallbacks
def test_insert_get(self):
yield self.client.insert('test', CF, 'testval', column=COLUMN)
yield self.client.insert('test2', CF, 'testval2', column=COLUMN)
yield self.client.insert('test', SCF, 'superval', column=COLUMN, super_column=SCOLUMN)
yield self.client.insert('test2', SCF, 'superval2', column=COLUMN,
super_column=SCOLUMN)
res = yield self.client.get('test', CF, column=COLUMN)
self.assertEqual(res.column.value, 'testval')
res = yield self.client.get('test2', CF, column=COLUMN)
self.assertEqual(res.column.value, 'testval2')
res = yield self.client.get('test', SCF, column=COLUMN, super_column=SCOLUMN)
self.assertEqual(res.column.value, 'superval')
res = yield self.client.get('test2', SCF, column=COLUMN, super_column=SCOLUMN)
self.assertEqual(res.column.value, 'superval2')
@defer.inlineCallbacks
def test_batch_insert_get_slice_and_count(self):
yield self.client.batch_insert('test', CF,
{COLUMN: 'test', COLUMN2: 'test2'})
yield self.client.batch_insert('test', SCF,
{SCOLUMN: {COLUMN: 'test', COLUMN2: 'test2'}})
res = yield self.client.get_slice('test', CF, names=(COLUMN, COLUMN2))
self.assertEqual(res[0].column.value, 'test')
self.assertEqual(res[1].column.value, 'test2')
res = yield self.client.get_slice('test', SCF, names=(COLUMN, COLUMN2),
super_column=SCOLUMN)
self.assertEqual(res[0].column.value, 'test')
self.assertEqual(res[1].column.value, 'test2')
res = yield self.client.get_count('test', CF)
self.assertEqual(res, 2)
@defer.inlineCallbacks
def test_batch_mutate_and_remove(self):
yield self.client.batch_mutate({'test': {CF: {COLUMN: 'test', COLUMN2: 'test2'}, SCF: { SCOLUMN: { COLUMN: 'test', COLUMN2: 'test2'} } }, 'test2': {CF: {COLUMN: 'test', COLUMN2: 'test2'}, SCF: { SCOLUMN: { COLUMN: 'test', COLUMN2: 'test2'} } } })
res = yield self.client.get_slice('test', CF, names=(COLUMN, COLUMN2))
self.assertEqual(res[0].column.value, 'test')
self.assertEqual(res[1].column.value, 'test2')
res = yield self.client.get_slice('test2', CF, names=(COLUMN, COLUMN2))
self.assertEqual(res[0].column.value, 'test')
self.assertEqual(res[1].column.value, 'test2')
res = yield self.client.get_slice('test', SCF, names=(COLUMN, COLUMN2),
super_column=SCOLUMN)
self.assertEqual(res[0].column.value, 'test')
self.assertEqual(res[1].column.value, 'test2')
res = yield self.client.get_slice('test2', SCF, names=(COLUMN, COLUMN2),
super_column=SCOLUMN)
self.assertEqual(res[0].column.value, 'test')
self.assertEqual(res[1].column.value, 'test2')
yield self.client.batch_remove({CF: ['test', 'test2']}, names=['test', 'test2'])
yield self.client.batch_remove({SCF: ['test', 'test2']}, names=['test', 'test2'], supercolumn=SCOLUMN)
@defer.inlineCallbacks
def test_batch_mutate_with_deletion(self):
yield self.client.batch_mutate({'test': {CF: {COLUMN: 'test', COLUMN2: 'test2'}}})
res = yield self.client.get_slice('test', CF, names=(COLUMN, COLUMN2))
self.assertEqual(res[0].column.value, 'test')
self.assertEqual(res[1].column.value, 'test2')
yield self.client.batch_mutate({'test': {CF: {COLUMN: None, COLUMN2: 'test3'}}})
res = yield self.client.get_slice('test', CF, names=(COLUMN, COLUMN2))
self.assertEqual(len(res), 1)
self.assertEqual(res[0].column.value, 'test3')
@defer.inlineCallbacks
def test_multiget_slice_remove(self):
yield self.client.insert('test', CF, 'testval', column=COLUMN)
yield self.client.insert('test', CF, 'testval', column=COLUMN2)
yield self.client.insert('test2', CF, 'testval2', column=COLUMN)
res = yield self.client.multiget(['test', 'test2'], CF, column=COLUMN)
self.assertEqual(res['test'][0].column.value, 'testval')
self.assertEqual(res['test2'][0].column.value, 'testval2')
res = yield self.client.multiget_slice(['test', 'test2'], CF)
self.assertEqual(res['test'][0].column.value, 'testval')
self.assertEqual(res['test'][1].column.value, 'testval')
self.assertEqual(res['test2'][0].column.value, 'testval2')
yield self.client.remove('test', CF, column=COLUMN)
yield self.client.remove('test2', CF, column=COLUMN)
res = yield self.client.multiget(['test', 'test2'], CF, column=COLUMN)
self.assertEqual(len(res['test']), 0)
self.assertEqual(len(res['test2']), 0)
@defer.inlineCallbacks
def test_range_slices(self):
yield self.client.insert('test', CF, 'testval', column=COLUMN)
yield self.client.insert('test', CF, 'testval', column=COLUMN2)
yield self.client.insert('test2', CF, 'testval2', column=COLUMN)
ks = yield self.client.get_range_slices(CF, start='', finish='')
keys = [k.key for k in ks]
for key in ['test', 'test2']:
self.assertIn(key, keys)
@defer.inlineCallbacks
def test_indexed_slices(self):
yield self.client.insert('test1', IDX_CF, 'one', column='col1')
yield self.client.insert('test2', IDX_CF, 'two', column='col1')
yield self.client.insert('test3', IDX_CF, 'three', column='col1')
expressions = [ttypes.IndexExpression('col1', ttypes.IndexOperator.EQ, 'two')]
res = yield self.client.get_indexed_slices(IDX_CF, expressions, start_key='')
self.assertEquals(res[0].columns[0].column.value,'two')
@defer.inlineCallbacks
def test_counter_add(self):
if self.version < COUNTERS_SUPPORTED_API:
raise unittest.SkipTest('Counters are not supported before 0.8')
# test standard column counter
yield self.client.add('test', COUNTER_CF, 1, column='col')
res = yield self.client.get('test', COUNTER_CF, column='col')
self.assertEquals(res.counter_column.value, 1)
yield self.client.add('test', COUNTER_CF, 1, column='col')
res = yield self.client.get('test', COUNTER_CF, column='col')
self.assertEquals(res.counter_column.value, 2)
# test super column counters
yield self.client.add('test', SUPERCOUNTER_CF, 1, column='col', super_column='scol')
res = yield self.client.get('test', SUPERCOUNTER_CF, column='col', super_column='scol')
self.assertEquals(res.counter_column.value, 1)
yield self.client.add('test', SUPERCOUNTER_CF, 1, column='col', super_column='scol')
res = yield self.client.get('test', SUPERCOUNTER_CF, column='col', super_column='scol')
self.assertEquals(res.counter_column.value, 2)
@defer.inlineCallbacks
def test_counter_remove(self):
if self.version < COUNTERS_SUPPORTED_API:
raise unittest.SkipTest('Counters are not supported before 0.8')
# test standard column counter
yield self.client.add('test', COUNTER_CF, 1, column='col')
res = yield self.client.get('test', COUNTER_CF, column='col')
self.assertEquals(res.counter_column.value, 1)
yield self.client.remove_counter('test', COUNTER_CF, column='col')
yield self.assertFailure(self.client.get('test', COUNTER_CF, column='col'),
ttypes.NotFoundException)
# test super column counters
yield self.client.add('test', SUPERCOUNTER_CF, 1, column='col', super_column='scol')
res = yield self.client.get('test', SUPERCOUNTER_CF, column='col', super_column='scol')
self.assertEquals(res.counter_column.value, 1)
yield self.client.remove_counter('test', SUPERCOUNTER_CF,
column='col', super_column='scol')
yield self.assertFailure(self.client.get('test', SUPERCOUNTER_CF,
column='col', super_column='scol'),
ttypes.NotFoundException)
def sleep(self, secs):
d = defer.Deferred()
reactor.callLater(secs, d.callback, None)
return d
@defer.inlineCallbacks
def test_ttls(self):
yield self.client.insert('test_ttls', CF, 'testval', column=COLUMN, ttl=1)
res = yield self.client.get('test_ttls', CF, column=COLUMN)
self.assertEqual(res.column.value, 'testval')
yield self.sleep(2)
yield self.assertFailure(self.client.get('test_ttls', CF, column=COLUMN), ttypes.NotFoundException)
yield self.client.batch_insert('test_ttls', CF, {COLUMN:'testval'}, ttl=1)
res = yield self.client.get('test_ttls', CF, column=COLUMN)
self.assertEqual(res.column.value, 'testval')
yield self.sleep(2)
yield self.assertFailure(self.client.get('test_ttls', CF, column=COLUMN), ttypes.NotFoundException)
yield self.client.batch_mutate({'test_ttls': {CF: {COLUMN: 'testval'}}}, ttl=1)
res = yield self.client.get('test_ttls', CF, column=COLUMN)
self.assertEqual(res.column.value, 'testval')
yield self.sleep(2)
yield self.assertFailure(self.client.get('test_ttls', CF, column=COLUMN), ttypes.NotFoundException)
def compare_keyspaces(self, ks1, ks2):
self.assertEqual(ks1.name, ks2.name)
self.assertEqual(ks1.strategy_class, ks2.strategy_class)
self.assertEqual(ks1.cf_defs, ks2.cf_defs)
def get_rf(ksdef):
rf = ksdef.replication_factor
if ksdef.strategy_options and \
'replication_factor' in ksdef.strategy_options:
rf = int(ksdef.strategy_options['replication_factor'])
return rf
def strat_opts_no_rf(ksdef):
if not ksdef.strategy_options:
return {}
opts = ksdef.strategy_options.copy()
if 'replication_factor' in ksdef.strategy_options:
del opts['replication_factor']
return opts
self.assertEqual(get_rf(ks1), get_rf(ks2))
self.assertEqual(strat_opts_no_rf(ks1), strat_opts_no_rf(ks2))
@defer.inlineCallbacks
def test_keyspace_manipulation(self):
try:
yield self.client.system_drop_keyspace(T_KEYSPACE)
except ttypes.InvalidRequestException:
pass
ksdef = ttypes.KsDef(name=T_KEYSPACE, strategy_class='org.apache.cassandra.locator.SimpleStrategy', strategy_options={}, cf_defs=[])
if self.version <= KS_RF_ATTRIBUTE:
ksdef.replication_factor = 1
else:
ksdef.strategy_options['replication_factor'] = '1'
yield self.client.system_add_keyspace(ksdef)
ks2 = yield self.client.describe_keyspace(T_KEYSPACE)
self.compare_keyspaces(ksdef, ks2)
if DO_SYSTEM_RENAMING:
newname = T_KEYSPACE + '2'
yield self.client.system_rename_keyspace(T_KEYSPACE, newname)
ks2 = yield self.client.describe_keyspace(newname)
ksdef.name = newname
self.compare_keyspaces(ksdef, ks2)
yield self.client.system_drop_keyspace(ksdef.name)
yield self.assertFailure(self.client.describe_keyspace(T_KEYSPACE), ttypes.NotFoundException)
if DO_SYSTEM_RENAMING:
yield self.assertFailure(self.client.describe_keyspace(ksdef.name), ttypes.NotFoundException)
@defer.inlineCallbacks
def test_column_family_manipulation(self):
# CfDef attributes present in all supported c*/thrift-api versions
common_attrs = (
('column_type', 'Standard'),
('comparator_type', 'org.apache.cassandra.db.marshal.BytesType'),
('comment', 'foo'),
('read_repair_chance', 1.0),
('column_metadata', []),
('gc_grace_seconds', 86400),
('default_validation_class', 'org.apache.cassandra.db.marshal.BytesType'),
('min_compaction_threshold', 5),
('max_compaction_threshold', 31),
)
cfdef = ttypes.CfDef(KEYSPACE, T_CF)
for attr, val in common_attrs:
setattr(cfdef, attr, val)
yield self.client.system_add_column_family(cfdef)
ksdef = yield self.client.describe_keyspace(KEYSPACE)
cfdefs = [c for c in ksdef.cf_defs if c.name == T_CF]
self.assertEqual(len(cfdefs), 1)
cfdef2 = cfdefs[0]
for attr, val in common_attrs:
val1 = getattr(cfdef, attr)
val2 = getattr(cfdef2, attr)
self.assertEqual(val1, val2, 'attribute %s mismatch: %r != %r' % (attr, val1, val2))
if DO_SYSTEM_RENAMING:
newname = T_CF + '2'
yield self.client.system_rename_column_family(T_CF, newname)
ksdef = yield self.client.describe_keyspace(KEYSPACE)
cfdef2 = [c for c in ksdef.cf_defs if c.name == newname][0]
self.assertNotIn(T_CF, [c.name for c in ksdef.cf_defs])
cfdef.name = newname
self.assertEqual(cfdef, cfdef2)
yield self.client.system_drop_column_family(cfdef.name)
ksdef = yield self.client.describe_keyspace(KEYSPACE)
self.assertNotIn(cfdef.name, [c.name for c in ksdef.cf_defs])
@defer.inlineCallbacks
def test_describes(self):
name = yield self.client.describe_cluster_name()
self.assertIsInstance(name, str)
self.assertNotEqual(name, '')
partitioner = yield self.client.describe_partitioner()
self.assert_(partitioner.startswith('org.apache.cassandra.'),
msg='partitioner is %r' % partitioner)
snitch = yield self.client.describe_snitch()
self.assert_(snitch.startswith('org.apache.cassandra.'),
msg='snitch is %r' % snitch)
version = yield self.client.describe_version()
self.assertIsInstance(version, str)
self.assertIn('.', version)
schemavers = yield self.client.describe_schema_versions()
self.assertIsInstance(schemavers, dict)
self.assertNotEqual(schemavers, {})
ring = yield self.client.describe_ring(KEYSPACE)
self.assertIsInstance(ring, list)
self.assertNotEqual(ring, [])
for r in ring:
self.assertIsInstance(r.start_token, str)
self.assertIsInstance(r.end_token, str)
self.assertIsInstance(r.endpoints, list)
self.assertNotEqual(r.endpoints, [])
for ep in r.endpoints:
self.assertIsInstance(ep, str)
@defer.inlineCallbacks
def test_errback(self):
yield self.client.remove('poiqwe', CF)
try:
yield self.client.get('poiqwe', CF, column='foo')
except Exception, e:
pass
@defer.inlineCallbacks
def test_bad_params(self):
# This test seems to kill the thrift connection, so we're skipping it for now
for x in xrange(CONNS+1):
try:
# pass an int where a string is required
yield self.client.get(12345, CF, column='foo')
except Exception, e:
pass
test_bad_params.skip = "Disabled pending further investigation..."
class ManagedCassandraClientFactoryTest(unittest.TestCase):
@defer.inlineCallbacks
def test_initial_connection_failure(self):
cmanager = ManagedCassandraClientFactory()
client = CassandraClient(cmanager)
d = cmanager.deferred
reactor.connectTCP('nonexistent.foobarexample.com', PORT, cmanager)
yield self.failUnlessFailure(d, error.DNSLookupError, error.TimeoutError)
cmanager.shutdown()
| |
# # testAPI_automate
# Created by Davy Yue 2017-06-26
import itertools
import string
import os
from xml.etree import cElementTree as ET
import networkx as nx
from mdtraj.core.element import Element
from foyer.smarts_graph import SMARTSGraph
from foyer.smarts import SMARTS as SMARTSParser
import parmed as pmd
from periodic import element
import numpy as np
import matplotlib.pyplot as plt
# get_ipython().magic('matplotlib inline') # unknown error need to check this
import time
from msibi import MSIBI, State, Pair, mie
import mdtraj as md
def read_search_mapping(search_mapping_filename, user_mapping_filename, topology):
"""Read the search mapping xml file
Parameters
----------
search_mapping_filename : str
Name of xml file containing ordered search parameters
user_mapping_filename : str
Name of xml file containing molecules in the system
topology : mdTraj.Topology
Topology object (to be expanded)
"""
# root = ET.fromstring(open(search_mapping_filename).read())
searchlist = [] # list containing all search values ordered by priority
searchlist.append("C")
searchlist.append("CC")
searchlist.append("CCC")
# for value in root.findall('value'):
# searchlist.append(value.attrib['searchstr'])
print("{0:s}: {1}".format("Search String", searchlist))
#root = ET.fromstring(open(user_mapping_filename).read())
molecules = []
for molecule in root.findall('molecule'):
molecules.append(molecule.attrib['mol_str']) #smarts string for molecule
print("{0:s}: {1}".format("Molecules", molecules))
parser = SMARTSParser()
matches = []
for searchstr in searchlist:
print(searchstr)
graph = SMARTSGraph(searchstr, parser=parser)
i = graph.find_matches(topology)
matches.append(list(i))
print(matches)
return matches
def recursive_MatchMaker():
return 0
# SMARTS string
# Current supported SMARTS string: https://github.com/mosdef-hub/foyer/issues/63
def read_user_mapping(user_mapping_filename):
root = ET.fromstring(open(user_mapping_filename).read())
# Get molecule_name
molecule = root.find('molecule')
molecule_name = molecule.attrib["molecule_name"]
# Get element_names, n_unitsPerSection
element_names = [] # only need for atom definition
element_names.append('carbon')
n_sections_BEAD = 0
n_unitsPerSection = 0 # number units per section being combined
for section in root.findall('molecule'):
# allow for different number of units in each section per bead
# Element identification using Periodic python package
# https://github.com/luisnaranjo733/periodic
# use foyer here to find indices
n_unitsPerSection = molecule.attrib['mol_str'].count("C")
# need to modify for different center elements
# counts number of carbon center atoms since each section is organic
print(n_unitsPerSection)
# check later for more different elements with loop
return n_unitsPerSection, molecule_name, element_names
# read_user_mapping(user_mapping_filename='propane_user_mapping.xml')
def read_system_info(struct_filename):
"""Read system information from structure hoomdxml file
Parameters
----------
struct_filename: str
Name of hoomdxml file containing molecule structures
"""
root = ET.fromstring(open(struct_filename).read())
n_unitsTotal = int(root.find('configuration').find('position').attrib['num'])
print(n_unitsTotal)
return n_unitsTotal
# read_system_info(struct_filename='start_aa.hoomdxml')
def create_system_mapping(element_names, n_beads_TOTAL, t):
"""Create a system mapping
Parameters
----------
element_names : (???)
(???)
n_beads_TOTAL : int
Number of beads in the system
t : mdTraj.Trajectory
Initial trajectory object generated from structure and trajectory files
"""
# SLOWEST PART OF CODE IS THIS FUNCTION
# Initialize atoms with elements
## for loop to traverse element_names array for elements
## need to expand from just carbon to more/different elements
## maybe use elements from periodic package
for atom in t.top.atoms: #possible other function
atom.element = Element.getBySymbol(atom.name) # check element
#need for the xml file to have element symbol as type
# Map the beads accordingly
cg_idx = 0
start_idx = 0
propane_map = {0: [0, 1, 2]} ## mapping definition needs to be created
# from search and user files
## TEST CODE
######################################################################
######################################################################
## TEST CODE
system_mapping = {}
for n in range(n_beads_TOTAL): # what does sections mean in this particular context
for bead, atoms in propane_map.items():
system_mapping[cg_idx] = [x + start_idx for x in atoms]
start_idx += len(atoms) # understand this part
cg_idx += 1
# Apply mapping for XYZ coordinates
cg_xyz = np.empty((t.n_frames, len(system_mapping), 3))
for cg_bead, aa_indices in system_mapping.items():
cg_xyz[:, cg_bead, :] = md.compute_center_of_mass(t.atom_slice(aa_indices))
# Apply mapping for Topology object
cg_top = md.Topology()
for cg_bead in system_mapping.keys(): #i got the keys keys keys
cg_top.add_atom('carbon', element.virtual_site, cg_top.add_residue('A',
cg_top.add_chain()))
## Check element and name for items 'A'
## Possible interface with mbuild for better UI and aesthetics
return cg_xyz, cg_top
def compute_files(cg_xyz, cg_top, t, molecule_name, element_names):
"""Compute the trajectory and rdf files
Parameters
----------
cg_xyz : ?????
Coarse-grained xyz coordinates of all the beads
cg_top : mdTraj.Topology
Coarse-grained topology object for all the beads
t : mdTraj.Trajectory (???? unsure if needed)
Initial trajectory object generated from structure and trajectory files
molecule_name : str
Name of the molecule(s) in the system
element_names : (???)
(???)
"""
# Create Trajectory object and save to .dcd file
cg_traj = md.Trajectory(cg_xyz, cg_top, time=None,
unitcell_lengths=t.unitcell_lengths,
unitcell_angles=t.unitcell_angles)
## need better file naming convention and rule guideline
filepath = os.path.join(os.getcwd(), 'data/cg_traj_{0:s}.dcd'.format(molecule_name))
cg_traj.save_dcd(filepath) ## need check statements to prevent file overwrite
## rename old/new files accordingly
# Create rdfs file from pairs
## might need for loop if more elements (later implementation)
## need some way to recognize pairs of units - not just carbon elements if expanded
pairs = cg_traj.top.select_pairs(selection1='name {0:s}'.format(element_names[0]), ## Check element
selection2='name {0:s}'.format(element_names[0])) ## Check element
r, g_r = md.compute_rdf(cg_traj, pairs=pairs,
r_range=(0, 1.2), bin_width=0.005)
## identify end of range with data pairs
## maybe something with read-file function - compare data values next to each other
## See where data drop-off occurs and plot respectively
## maybe use slope - negative less than some number, set as cutoff
## record cutoff point somewhere for debugging purposes
filepath = os.path.join(os.getcwd(), 'data/rdfs_aa.txt')
np.savetxt(filepath, np.transpose([r, g_r])) # need check statements to prevent file overwrite
print("Saved rdfs to file")
plot_output(r, g_r, molecule_name)
def plot_output(x, y, molecule_name):
"""Read the search mapping xml file
Parameters
----------
x : numpy.ndarray, dtype=float
All radius values generated by the rdf computation
y : numpy.ndarray, dtype=float
All g(r) values generated by the rdf computation
molecule_name : str
Name of the molecule(s) in the system
"""
## modify figsize according to drop-off point
plt.figure(num=None, figsize=(16, 12), dpi=80, facecolor='w', edgecolor='k')
plt.plot(x, y, label="CG {0:s}".format(molecule_name)) ## Check label use string format method
## look up more aesthetic matplotlib functions
plt.title('Propane CG Graph at {0:s}'.format(
time.strftime("%m/%d/%Y %I:%M:%S")))
plt.xlabel("r")
plt.ylabel("g(r)")
plt.legend()
plt.savefig(os.path.join(os.getcwd(),
'data/trajAPI_plot_{0:s}.pdf'.format(molecule_name)))
print("Figure should be saved to data folder")
def convert_Traj_RDF():
"""Convert the trajectory and structure files into the rdf"""
## add parameters to function calls - maybe add other functions
## other potential functions:
## - check_file_overwrite()
## - check_g_r_dropoff() - integrate with plot function
## - manage_filetypes() - read in files (maybe for mdtraj flexibility)
traj_filename = os.path.join(os.getcwd(),
'data/traj_unwrapped.dcd')
struct_filename = os.path.join(os.getcwd(),
'data/start_aa.hoomdxml')
search_mapping_filename = os.path.join(os.getcwd(),
'data/propane_search_mapping.xml')
user_mapping_filename = os.path.join(os.getcwd(),
'data/propane_user_mapping.xml')
t = md.load(traj_filename, top=struct_filename)
print("Loaded struct & traj files")
molecules = t.top.find_molecules()
first_mol_indices = [atom.index for atom in list(molecules[0])]
first_molecule = t.top.subset(first_mol_indices) # topology for first molecule
for atom in first_molecule.atoms: #possible other function
atom.element = Element.getBySymbol(atom.name)
topology = first_molecule.to_openmm(traj=None) # openmm topology accepted by foyer
# import pdb; pdb.set_trace()
read_search_mapping(search_mapping_filename,
user_mapping_filename, topology)
# n_units_TOTAL = read_system_info(struct_filename)
# print("Read in system info from struct file")
# n_unitsPerSection, molecule_name, element_names = read_user_mapping(user_mapping_filename)
# print("Read in user_mapping file")
# n_beads_TOTAL = n_units_TOTAL // n_unitsPerSection
# cg_xyz, cg_top = create_system_mapping(element_names, n_beads_TOTAL, t)
# print("Created system mapping")
# compute_files(cg_xyz, cg_top, t, molecule_name, element_names)
# Execute functions
## maybe initialize element_names array from read-in file with bonds
## bonds recorded in structure indicate elements involved in rdf
convert_Traj_RDF()
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "ed25519-"
cfg.versionfile_source = "src/ed25519/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| |
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
"""
def set_save_plot_interval(net, epoch):
net.save_plot_interval = SAVE_PLOT_INTERVAL
def exp_a(name):
# like 134a but linear outputs and MSE
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=5000,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(1),
'b': Uniform(1)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
}
],
layer_changes={
50001: {
'remove_from': -2,
'new_layers':
[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(1),
'b': Uniform(1)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
}
]
},
100001: {
'remove_from': -2,
'callback': set_save_plot_interval,
'new_layers':
[
{
'type': BLSTMLayer,
'num_units': 40,
# 'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
}
]
},
100501: {
'remove_from': -3,
'new_layers':
[
{
'type': BLSTMLayer,
'num_units': 80,
# 'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=101001)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.