gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
#
# Electrum - lightweight NavCoin client
# Copyright (C) 2013 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import navcoin
from navcoin import *
from i18n import _
from transaction import Transaction, is_extended_pubkey
from util import print_msg, InvalidPassword
class Account(object):
def __init__(self, v):
self.receiving_pubkeys = v.get('receiving', [])
self.change_pubkeys = v.get('change', [])
# addresses will not be stored on disk
self.receiving_addresses = map(self.pubkeys_to_address, self.receiving_pubkeys)
self.change_addresses = map(self.pubkeys_to_address, self.change_pubkeys)
def dump(self):
return {'receiving':self.receiving_pubkeys, 'change':self.change_pubkeys}
def get_pubkey(self, for_change, n):
pubkeys_list = self.change_pubkeys if for_change else self.receiving_pubkeys
return pubkeys_list[n]
def get_address(self, for_change, n):
addr_list = self.change_addresses if for_change else self.receiving_addresses
return addr_list[n]
def get_pubkeys(self, for_change, n):
return [ self.get_pubkey(for_change, n)]
def get_addresses(self, for_change):
addr_list = self.change_addresses if for_change else self.receiving_addresses
return addr_list[:]
def derive_pubkeys(self, for_change, n):
pass
def create_new_address(self, for_change):
pubkeys_list = self.change_pubkeys if for_change else self.receiving_pubkeys
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(pubkeys_list)
pubkeys = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(pubkeys)
pubkeys_list.append(pubkeys)
addr_list.append(address)
print_msg(address)
return address
def pubkeys_to_address(self, pubkey):
return public_key_to_bc_address(pubkey.decode('hex'))
def has_change(self):
return True
def get_name(self, k):
return _('Main account')
def redeem_script(self, for_change, n):
return None
def is_used(self, wallet):
addresses = self.get_addresses(False)
return any(wallet.address_is_old(a, -1) for a in addresses)
def synchronize_sequence(self, wallet, for_change):
limit = wallet.gap_limit_for_change if for_change else wallet.gap_limit
while True:
addresses = self.get_addresses(for_change)
if len(addresses) < limit:
address = self.create_new_address(for_change)
wallet.add_address(address)
continue
if map( lambda a: wallet.address_is_old(a), addresses[-limit:] ) == limit*[False]:
break
else:
address = self.create_new_address(for_change)
wallet.add_address(address)
def synchronize(self, wallet):
self.synchronize_sequence(wallet, False)
self.synchronize_sequence(wallet, True)
class ImportedAccount(Account):
def __init__(self, d):
self.keypairs = d['imported']
def synchronize(self, wallet):
return
def get_addresses(self, for_change):
return [] if for_change else sorted(self.keypairs.keys())
def get_pubkey(self, *sequence):
for_change, i = sequence
assert for_change == 0
addr = self.get_addresses(0)[i]
return self.keypairs[addr][0]
def get_xpubkeys(self, for_change, n):
return self.get_pubkeys(for_change, n)
def get_private_key(self, sequence, wallet, password):
from wallet import pw_decode
for_change, i = sequence
assert for_change == 0
address = self.get_addresses(0)[i]
pk = pw_decode(self.keypairs[address][1], password)
# this checks the password
if address != address_from_private_key(pk):
raise InvalidPassword()
return [pk]
def has_change(self):
return False
def add(self, address, pubkey, privkey, password):
from wallet import pw_encode
self.keypairs[address] = (pubkey, pw_encode(privkey, password ))
def remove(self, address):
self.keypairs.pop(address)
def dump(self):
return {'imported':self.keypairs}
def get_name(self, k):
return _('Imported keys')
def update_password(self, old_password, new_password):
for k, v in self.keypairs.items():
pubkey, a = v
b = pw_decode(a, old_password)
c = pw_encode(b, new_password)
self.keypairs[k] = (pubkey, c)
class OldAccount(Account):
""" Privatekey(type,n) = Master_private_key + H(n|S|type) """
def __init__(self, v):
Account.__init__(self, v)
self.mpk = v['mpk'].decode('hex')
@classmethod
def mpk_from_seed(klass, seed):
secexp = klass.stretch_key(seed)
master_private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
master_public_key = master_private_key.get_verifying_key().to_string().encode('hex')
return master_public_key
@classmethod
def stretch_key(self,seed):
oldseed = seed
for i in range(100000):
seed = hashlib.sha256(seed + oldseed).digest()
return string_to_number( seed )
@classmethod
def get_sequence(self, mpk, for_change, n):
return string_to_number( Hash( "%d:%d:"%(n,for_change) + mpk ) )
def get_address(self, for_change, n):
pubkey = self.get_pubkey(for_change, n)
address = public_key_to_bc_address( pubkey.decode('hex') )
return address
@classmethod
def get_pubkey_from_mpk(self, mpk, for_change, n):
z = self.get_sequence(mpk, for_change, n)
master_public_key = ecdsa.VerifyingKey.from_string(mpk, curve = SECP256k1)
pubkey_point = master_public_key.pubkey.point + z*SECP256k1.generator
public_key2 = ecdsa.VerifyingKey.from_public_point(pubkey_point, curve = SECP256k1)
return '04' + public_key2.to_string().encode('hex')
def derive_pubkeys(self, for_change, n):
return self.get_pubkey_from_mpk(self.mpk, for_change, n)
def get_private_key_from_stretched_exponent(self, for_change, n, secexp):
order = generator_secp256k1.order()
secexp = ( secexp + self.get_sequence(self.mpk, for_change, n) ) % order
pk = number_to_string( secexp, generator_secp256k1.order() )
compressed = False
return SecretToASecret( pk, compressed )
def get_private_key(self, sequence, wallet, password):
seed = wallet.get_seed(password)
self.check_seed(seed)
for_change, n = sequence
secexp = self.stretch_key(seed)
pk = self.get_private_key_from_stretched_exponent(for_change, n, secexp)
return [pk]
def check_seed(self, seed):
secexp = self.stretch_key(seed)
master_private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
master_public_key = master_private_key.get_verifying_key().to_string()
if master_public_key != self.mpk:
print_error('invalid password (mpk)', self.mpk.encode('hex'), master_public_key.encode('hex'))
raise InvalidPassword()
return True
def get_master_pubkeys(self):
return [self.mpk.encode('hex')]
def get_type(self):
return _('Old Electrum format')
def get_xpubkeys(self, for_change, n):
s = ''.join(map(lambda x: navcoin.int_to_hex(x,2), (for_change, n)))
mpk = self.mpk.encode('hex')
x_pubkey = 'fe' + mpk + s
return [ x_pubkey ]
@classmethod
def parse_xpubkey(self, x_pubkey):
assert is_extended_pubkey(x_pubkey)
pk = x_pubkey[2:]
mpk = pk[0:128]
dd = pk[128:]
s = []
while dd:
n = int(navcoin.rev_hex(dd[0:4]), 16)
dd = dd[4:]
s.append(n)
assert len(s) == 2
return mpk, s
class BIP32_Account(Account):
def __init__(self, v):
Account.__init__(self, v)
self.xpub = v['xpub']
self.xpub_receive = None
self.xpub_change = None
def dump(self):
d = Account.dump(self)
d['xpub'] = self.xpub
return d
def first_address(self):
pubkeys = self.derive_pubkeys(0, 0)
addr = self.pubkeys_to_address(pubkeys)
return addr, pubkeys
def get_master_pubkeys(self):
return [self.xpub]
@classmethod
def derive_pubkey_from_xpub(self, xpub, for_change, n):
_, _, _, c, cK = deserialize_xkey(xpub)
for i in [for_change, n]:
cK, c = CKD_pub(cK, c, i)
return cK.encode('hex')
def get_pubkey_from_xpub(self, xpub, for_change, n):
xpubs = self.get_master_pubkeys()
i = xpubs.index(xpub)
pubkeys = self.get_pubkeys(for_change, n)
return pubkeys[i]
def derive_pubkeys(self, for_change, n):
xpub = self.xpub_change if for_change else self.xpub_receive
if xpub is None:
xpub = bip32_public_derivation(self.xpub, "", "/%d"%for_change)
if for_change:
self.xpub_change = xpub
else:
self.xpub_receive = xpub
_, _, _, c, cK = deserialize_xkey(xpub)
cK, c = CKD_pub(cK, c, n)
result = cK.encode('hex')
return result
def get_private_key(self, sequence, wallet, password):
out = []
xpubs = self.get_master_pubkeys()
roots = [k for k, v in wallet.master_public_keys.iteritems() if v in xpubs]
for root in roots:
xpriv = wallet.get_master_private_key(root, password)
if not xpriv:
continue
_, _, _, c, k = deserialize_xkey(xpriv)
pk = bip32_private_key( sequence, k, c )
out.append(pk)
return out
def get_type(self):
return _('Standard 1 of 1')
def get_xpubkeys(self, for_change, n):
# unsorted
s = ''.join(map(lambda x: navcoin.int_to_hex(x,2), (for_change,n)))
xpubs = self.get_master_pubkeys()
return map(lambda xpub: 'ff' + navcoin.DecodeBase58Check(xpub).encode('hex') + s, xpubs)
@classmethod
def parse_xpubkey(self, pubkey):
assert is_extended_pubkey(pubkey)
pk = pubkey.decode('hex')
pk = pk[1:]
xkey = navcoin.EncodeBase58Check(pk[0:78])
dd = pk[78:]
s = []
while dd:
n = int( navcoin.rev_hex(dd[0:2].encode('hex')), 16)
dd = dd[2:]
s.append(n)
assert len(s) == 2
return xkey, s
def get_name(self, k):
return "Main account" if k == '0' else "Account " + k
class Multisig_Account(BIP32_Account):
def __init__(self, v):
self.m = v.get('m', 2)
Account.__init__(self, v)
self.xpub_list = v['xpubs']
def dump(self):
d = Account.dump(self)
d['xpubs'] = self.xpub_list
d['m'] = self.m
return d
def get_pubkeys(self, for_change, n):
return self.get_pubkey(for_change, n)
def derive_pubkeys(self, for_change, n):
return map(lambda x: self.derive_pubkey_from_xpub(x, for_change, n), self.get_master_pubkeys())
def redeem_script(self, for_change, n):
pubkeys = self.get_pubkeys(for_change, n)
return Transaction.multisig_script(sorted(pubkeys), self.m)
def pubkeys_to_address(self, pubkeys):
redeem_script = Transaction.multisig_script(sorted(pubkeys), self.m)
address = hash_160_to_bc_address(hash_160(redeem_script.decode('hex')), 28)
return address
def get_address(self, for_change, n):
return self.pubkeys_to_address(self.get_pubkeys(for_change, n))
def get_master_pubkeys(self):
return self.xpub_list
def get_type(self):
return _('Multisig %d of %d'%(self.m, len(self.xpub_list)))
| |
from datetime import date
from pathlib import Path
from io import StringIO
from unittest import mock
import logging
import pytest
import responses
from yaml import YAMLError
from cumulusci.core.exceptions import BulkDataException
from cumulusci.tasks.bulkdata.mapping_parser import (
MappingLookup,
MappingStep,
parse_from_yaml,
validate_and_inject_mapping,
ValidationError,
CaseInsensitiveDict,
)
from cumulusci.tasks.bulkdata.step import DataOperationType
from cumulusci.tests.util import DummyOrgConfig, mock_describe_calls
from cumulusci.tasks.bulkdata.step import DataApi
class TestMappingParser:
def test_simple_parse(self):
base_path = Path(__file__).parent / "mapping_v2.yml"
assert parse_from_yaml(base_path)
def test_after(self):
base_path = Path(__file__).parent / "mapping_after.yml"
result = parse_from_yaml(base_path)
step = result["Insert Accounts"]
lookups = step["lookups"]
assert lookups
assert "after" in lookups["ParentId"]
after_list = {
lookup["after"] for lookup in lookups.values() if "after" in lookup
}
assert after_list
def test_deprecation(self, caplog):
base_path = Path(__file__).parent / "mapping_v2.yml"
caplog.set_level(logging.WARNING)
parse_from_yaml(base_path)
assert "record_type" in caplog.text
def test_bad_mapping_syntax(self):
base_path = Path(__file__).parent / "mapping_v2.yml"
with open(base_path, "r") as f:
data = f.read().replace(":", ": abcd")
with pytest.raises(YAMLError):
parse_from_yaml(StringIO(data))
def test_bad_mapping_grammar(self):
base_path = Path(__file__).parent / "mapping_v2.yml"
with open(base_path, "r") as f:
data = f.read().replace("record_type", "xyzzy")
with pytest.raises(ValidationError):
parse_from_yaml(StringIO(data))
def test_bad_mapping_id_mode(self):
base_path = Path(__file__).parent / "mapping_v2.yml"
with open(base_path, "r") as f:
data = f.read().replace("Name: name", "Id: sf_id")
with pytest.raises(ValidationError):
parse_from_yaml(StringIO(data))
def test_bad_mapping_oid_as_pk(self):
base_path = Path(__file__).parent / "mapping_v1.yml"
with open(base_path, "r") as f:
data = f.read().replace("api: bulk", "oid_as_pk: True`")
with pytest.raises(ValidationError):
parse_from_yaml(StringIO(data))
def test_bad_mapping_batch_size(self):
base_path = Path(__file__).parent / "mapping_v2.yml"
with open(base_path, "r") as f:
data = f.read().replace("record_type: HH_Account", "batch_size: 500")
with pytest.raises(ValidationError):
parse_from_yaml(StringIO(data))
def test_default_table_to_sobject_name(self):
base_path = Path(__file__).parent / "mapping_v3.yml"
with open(base_path, "r") as f:
data = f.read()
ms = parse_from_yaml(StringIO(data))
assert ms["Insert Accounts"].table == "Account"
def test_fields_list_to_dict(self):
base_path = Path(__file__).parent / "mapping_v3.yml"
with open(base_path, "r") as f:
data = f.read()
ms = parse_from_yaml(StringIO(data))
assert ms["Insert Accounts"].fields == {"Name": "Name"}
assert ms["Insert Contacts"].fields == {
"FirstName": "FirstName",
"LastName": "LastName",
"Email": "Email",
}
def test_fields_default_not_present(self):
base_path = Path(__file__).parent / "mapping_v3.yml"
with open(base_path, "r") as f:
data = f.read()
ms = parse_from_yaml(StringIO(data))
assert ms["Insert Junction Objects"].fields == {}
def test_fields_default_null(self):
base_path = Path(__file__).parent / "mapping_v3.yml"
with open(base_path, "r") as f:
data = f.read()
ms = parse_from_yaml(StringIO(data))
assert ms["Insert Other Junction Objects"].fields == {}
def test_load_from_bytes_stream(self):
base_path = Path(__file__).parent / "mapping_v2.yml"
with open(base_path, "rb") as f:
assert parse_from_yaml(f)
def test_get_complete_field_map(self):
m = MappingStep(
sf_object="Account",
fields=["Name", "AccountSite"],
lookups={"ParentId": MappingLookup(table="Account")},
)
assert m.get_complete_field_map() == {
"Name": "Name",
"AccountSite": "AccountSite",
"ParentId": "ParentId",
}
assert m.get_complete_field_map(include_id=True) == {
"Id": "sf_id",
"Name": "Name",
"AccountSite": "AccountSite",
"ParentId": "ParentId",
}
def test_get_relative_date_context(self):
mapping = MappingStep(
sf_object="Account",
fields=["Some_Date__c", "Some_Datetime__c"],
anchor_date="2020-07-01",
)
org_config = mock.Mock()
org_config.salesforce_client.Account.describe.return_value = {
"fields": [
{"name": "Some_Date__c", "type": "date"},
{"name": "Some_Datetime__c", "type": "datetime"},
{"name": "Some_Bool__c", "type": "boolean"},
]
}
assert mapping.get_relative_date_context(org_config) == ([0], [1], date.today())
# Start of FLS/Namespace Injection Unit Tests
def test_is_injectable(self):
assert MappingStep._is_injectable("Test__c")
assert not MappingStep._is_injectable("npsp__Test__c")
assert not MappingStep._is_injectable("Account")
def test_get_permission_type(self):
ms = MappingStep(
sf_object="Account", fields=["Name"], action=DataOperationType.INSERT
)
assert ms._get_permission_type(DataOperationType.INSERT) == "createable"
assert ms._get_permission_type(DataOperationType.QUERY) == "queryable"
ms = MappingStep(
sf_object="Account", fields=["Name"], action=DataOperationType.UPDATE
)
assert ms._get_permission_type(DataOperationType.INSERT) == "updateable"
def test_check_field_permission(self):
ms = MappingStep(
sf_object="Account", fields=["Name"], action=DataOperationType.INSERT
)
assert ms._check_field_permission(
{"Name": {"createable": True}}, "Name", DataOperationType.INSERT
)
assert ms._check_field_permission(
{"Name": {"createable": True}}, "Name", DataOperationType.QUERY
)
ms = MappingStep(
sf_object="Account", fields=["Name"], action=DataOperationType.UPDATE
)
assert not ms._check_field_permission(
{"Name": {"updateable": False}}, "Name", DataOperationType.INSERT
)
assert not ms._check_field_permission(
{"Name": {"updateable": False}}, "Website", DataOperationType.INSERT
)
def test_validate_field_dict__fls_checks(self):
ms = MappingStep(
sf_object="Account",
fields=["Id", "Name", "Website"],
action=DataOperationType.INSERT,
)
assert ms._validate_field_dict(
describe=CaseInsensitiveDict(
{"Name": {"createable": True}, "Website": {"createable": True}}
),
field_dict=ms.fields_,
inject=None,
strip=None,
drop_missing=False,
data_operation_type=DataOperationType.INSERT,
)
assert not ms._validate_field_dict(
describe=CaseInsensitiveDict(
{"Name": {"createable": True}, "Website": {"createable": False}}
),
field_dict=ms.fields_,
inject=None,
strip=None,
drop_missing=False,
data_operation_type=DataOperationType.INSERT,
)
def test_validate_field_dict__injection(self):
ms = MappingStep(
sf_object="Account",
fields=["Id", "Name", "Test__c"],
action=DataOperationType.INSERT,
)
assert ms._validate_field_dict(
describe=CaseInsensitiveDict(
{"Name": {"createable": True}, "npsp__Test__c": {"createable": True}}
),
field_dict=ms.fields_,
inject=lambda field: f"npsp__{field}",
strip=None,
drop_missing=False,
data_operation_type=DataOperationType.INSERT,
)
assert ms.fields_ == {"Id": "Id", "Name": "Name", "npsp__Test__c": "Test__c"}
def test_validate_field_dict__injection_duplicate_fields(self):
ms = MappingStep(
sf_object="Account",
fields=["Id", "Name", "Test__c"],
action=DataOperationType.INSERT,
)
assert ms._validate_field_dict(
describe=CaseInsensitiveDict(
{
"Name": {"createable": True},
"npsp__Test__c": {"createable": True},
"Test__c": {"createable": True},
}
),
field_dict=ms.fields_,
inject=lambda field: f"npsp__{field}",
strip=None,
drop_missing=False,
data_operation_type=DataOperationType.INSERT,
)
assert ms.fields_ == {"Id": "Id", "Name": "Name", "Test__c": "Test__c"}
def test_validate_field_dict__drop_missing(self):
ms = MappingStep(
sf_object="Account",
fields=["Id", "Name", "Website"],
action=DataOperationType.INSERT,
)
assert ms._validate_field_dict(
describe=CaseInsensitiveDict(
{"Name": {"createable": True}, "Website": {"createable": False}}
),
field_dict=ms.fields_,
inject=None,
strip=None,
drop_missing=True,
data_operation_type=DataOperationType.INSERT,
)
assert ms.fields_ == {"Id": "Id", "Name": "Name"}
def test_validate_sobject(self):
ms = MappingStep(
sf_object="Account", fields=["Name"], action=DataOperationType.INSERT
)
assert ms._validate_sobject(
CaseInsensitiveDict({"Account": {"createable": True}}),
None,
DataOperationType.INSERT,
)
assert ms._validate_sobject(
CaseInsensitiveDict({"Account": {"queryable": True}}),
None,
DataOperationType.QUERY,
)
ms = MappingStep(
sf_object="Account", fields=["Name"], action=DataOperationType.UPDATE
)
assert not ms._validate_sobject(
CaseInsensitiveDict({"Account": {"updateable": False}}),
None,
DataOperationType.INSERT,
)
def test_validate_sobject__injection(self):
ms = MappingStep(
sf_object="Test__c", fields=["Name"], action=DataOperationType.INSERT
)
assert ms._validate_sobject(
CaseInsensitiveDict({"npsp__Test__c": {"createable": True}}),
lambda obj: f"npsp__{obj}",
DataOperationType.INSERT,
)
assert ms.sf_object == "npsp__Test__c"
def test_validate_sobject__injection_duplicate(self):
ms = MappingStep(
sf_object="Test__c", fields=["Name"], action=DataOperationType.INSERT
)
assert ms._validate_sobject(
CaseInsensitiveDict(
{"npsp__Test__c": {"createable": True}, "Test__c": {"createable": True}}
),
lambda obj: f"npsp__{obj}",
DataOperationType.INSERT,
)
assert ms.sf_object == "Test__c"
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_sobject",
return_value=True,
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_field_dict",
return_value=True,
)
def test_validate_and_inject_namespace__injection_fields(
self, mock_field, mock_sobject
):
ms = parse_from_yaml(
StringIO(
"""Insert Accounts:
sf_object: Account
table: Account
fields:
- Test__c"""
)
)["Insert Accounts"]
org_config = mock.Mock()
org_config.salesforce_client.describe.return_value = {
"sobjects": [{"name": "Account", "createable": True}]
}
org_config.salesforce_client.Account.describe.return_value = {
"fields": [{"name": "ns__Test__c", "createable": True}]
}
assert ms.validate_and_inject_namespace(
org_config, "ns", DataOperationType.INSERT, inject_namespaces=True
)
ms._validate_sobject.assert_called_once_with(
CaseInsensitiveDict({"Account": {"name": "Account", "createable": True}}),
mock.ANY, # This is a function def
DataOperationType.INSERT,
)
ms._validate_field_dict.assert_has_calls(
[
mock.call(
CaseInsensitiveDict(
{"ns__Test__c": {"name": "ns__Test__c", "createable": True}}
),
ms.fields,
mock.ANY, # local function def
mock.ANY, # local function def
False,
DataOperationType.INSERT,
),
mock.call(
{"ns__Test__c": {"name": "ns__Test__c", "createable": True}},
ms.lookups,
mock.ANY, # local function def
mock.ANY, # local function def
False,
DataOperationType.INSERT,
),
]
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_sobject",
return_value=True,
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_field_dict",
return_value=True,
)
def test_validate_and_inject_namespace__injection_lookups(
self, mock_field, mock_sobject
):
ms = parse_from_yaml(
StringIO(
"""Insert Accounts:
sf_object: Account
table: Account
fields:
- Name
lookups:
Lookup__c:
table: Stuff"""
)
)["Insert Accounts"]
org_config = mock.Mock()
org_config.salesforce_client.describe.return_value = {
"sobjects": [{"name": "Account", "createable": True}]
}
org_config.salesforce_client.Account.describe.return_value = {
"fields": [
{"name": "Name", "createable": True},
{"name": "ns__Lookup__c", "updateable": False, "createable": True},
]
}
assert ms.validate_and_inject_namespace(
org_config, "ns", DataOperationType.INSERT, inject_namespaces=True
)
ms._validate_sobject.assert_called_once_with(
CaseInsensitiveDict({"Account": {"name": "Account", "createable": True}}),
mock.ANY, # local function def
DataOperationType.INSERT,
)
ms._validate_field_dict.assert_has_calls(
[
mock.call(
{
"Name": {"name": "Name", "createable": True},
"ns__Lookup__c": {
"name": "ns__Lookup__c",
"updateable": False,
"createable": True,
},
},
ms.fields,
mock.ANY, # local function def.
mock.ANY, # local function def.
False,
DataOperationType.INSERT,
),
mock.call(
{
"Name": {"name": "Name", "createable": True},
"ns__Lookup__c": {
"name": "ns__Lookup__c",
"updateable": False,
"createable": True,
},
},
ms.lookups,
mock.ANY, # local function def.
mock.ANY, # local function def.
False,
DataOperationType.INSERT,
),
]
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_sobject",
return_value=True,
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_field_dict",
return_value=True,
)
def test_validate_and_inject_namespace__fls(self, mock_field, mock_sobject):
ms = MappingStep(
sf_object="Test__c", fields=["Field__c"], action=DataOperationType.INSERT
)
org_config = mock.Mock()
org_config.salesforce_client.describe.return_value = {
"sobjects": [{"name": "Test__c", "createable": True}]
}
org_config.salesforce_client.Test__c.describe.return_value = {
"fields": [{"name": "Field__c", "createable": True}]
}
assert ms.validate_and_inject_namespace(
org_config, "ns", DataOperationType.INSERT
)
ms._validate_sobject.assert_called_once_with(
CaseInsensitiveDict({"Test__c": {"name": "Test__c", "createable": True}}),
None,
DataOperationType.INSERT,
)
ms._validate_field_dict.assert_has_calls(
[
mock.call(
{"Field__c": {"name": "Field__c", "createable": True}},
{"Field__c": "Field__c"},
None,
None,
False,
DataOperationType.INSERT,
),
mock.call(
{"Field__c": {"name": "Field__c", "createable": True}},
{},
None,
None,
False,
DataOperationType.INSERT,
),
]
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_sobject",
return_value=False,
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_field_dict",
return_value=True,
)
def test_validate_and_inject_namespace__fls_sobject_failure(
self, mock_field, mock_sobject
):
ms = MappingStep(
sf_object="Test__c", fields=["Name"], action=DataOperationType.INSERT
)
org_config = mock.Mock()
org_config.salesforce_client.describe.return_value = {
"sobjects": [{"name": "Test__c", "createable": False}]
}
org_config.salesforce_client.Test__c.describe.return_value = {
"fields": [{"name": "Name", "createable": True}]
}
assert not ms.validate_and_inject_namespace(
org_config, "ns", DataOperationType.INSERT
)
ms._validate_sobject.assert_called_once_with(
{"Test__c": {"name": "Test__c", "createable": False}},
None,
DataOperationType.INSERT,
)
ms._validate_field_dict.assert_not_called()
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_sobject",
return_value=True,
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_field_dict",
return_value=False,
)
def test_validate_and_inject_namespace__fls_fields_failure(
self, mock_field, mock_sobject
):
ms = MappingStep(
sf_object="Test__c", fields=["Name"], action=DataOperationType.INSERT
)
org_config = mock.Mock()
org_config.salesforce_client.describe.return_value = {
"sobjects": [{"name": "Test__c", "createable": True}]
}
org_config.salesforce_client.Test__c.describe.return_value = {
"fields": [{"name": "Name", "createable": False}]
}
assert not ms.validate_and_inject_namespace(
org_config, "ns", DataOperationType.INSERT
)
ms._validate_sobject.assert_called_once_with(
{"Test__c": {"name": "Test__c", "createable": True}},
None,
DataOperationType.INSERT,
)
ms._validate_field_dict.assert_has_calls(
[
mock.call(
{"Name": {"name": "Name", "createable": False}},
{"Name": "Name"},
None,
None,
False,
DataOperationType.INSERT,
)
]
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_sobject",
return_value=True,
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_field_dict",
side_effect=[True, False],
)
def test_validate_and_inject_namespace__fls_lookups_failure(
self, mock_field, mock_sobject
):
ms = parse_from_yaml(
StringIO(
"""Insert Accounts:
sf_object: Account
table: Account
fields:
- Name
lookups:
Lookup__c:
table: Stuff"""
)
)["Insert Accounts"]
org_config = mock.Mock()
org_config.salesforce_client.describe.return_value = {
"sobjects": [{"name": "Account", "createable": True}]
}
org_config.salesforce_client.Account.describe.return_value = {
"fields": [
{"name": "Name", "createable": True},
{"name": "Lookup__c", "updateable": True, "createable": False},
]
}
assert not ms.validate_and_inject_namespace(
org_config, "ns", DataOperationType.INSERT
)
ms._validate_sobject.assert_called_once_with(
{"Account": {"name": "Account", "createable": True}},
None,
DataOperationType.INSERT,
)
ms._validate_field_dict.assert_has_calls(
[
mock.call(
{
"Name": {"name": "Name", "createable": True},
"Lookup__c": {
"name": "Lookup__c",
"updateable": True,
"createable": False,
},
},
{"Name": "Name"},
None,
None,
False,
DataOperationType.INSERT,
),
mock.call(
{
"Name": {"name": "Name", "createable": True},
"Lookup__c": {
"name": "Lookup__c",
"updateable": True,
"createable": False,
},
},
ms.lookups,
None,
None,
False,
DataOperationType.INSERT,
),
]
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_sobject",
return_value=True,
)
@mock.patch(
"cumulusci.tasks.bulkdata.mapping_parser.MappingStep._validate_field_dict",
side_effect=[True, False],
)
def test_validate_and_inject_namespace__fls_lookups_update_failure(
self, mock_field, mock_sobject
):
ms = parse_from_yaml(
StringIO(
"""Insert Accounts:
sf_object: Account
table: Account
fields:
- Name
lookups:
Lookup__c:
table: Stuff
after: Insert Stuff"""
)
)["Insert Accounts"]
org_config = mock.Mock()
org_config.salesforce_client.describe.return_value = {
"sobjects": [{"name": "Account", "createable": True}]
}
org_config.salesforce_client.Account.describe.return_value = {
"fields": [
{"name": "Name", "createable": True},
{"name": "Lookup__c", "updateable": False, "createable": True},
]
}
assert not ms.validate_and_inject_namespace(
org_config, "ns", DataOperationType.INSERT
)
ms._validate_sobject.assert_called_once_with(
{"Account": {"name": "Account", "createable": True}},
None,
DataOperationType.INSERT,
)
ms._validate_field_dict.assert_has_calls(
[
mock.call(
{
"Name": {"name": "Name", "createable": True},
"Lookup__c": {
"name": "Lookup__c",
"updateable": False,
"createable": True,
},
},
{"Name": "Name"},
None,
None,
False,
DataOperationType.INSERT,
),
mock.call(
{
"Name": {"name": "Name", "createable": True},
"Lookup__c": {
"name": "Lookup__c",
"updateable": False,
"createable": True,
},
},
ms.lookups,
None,
None,
False,
DataOperationType.INSERT,
),
]
)
# Start of FLS/Namespace Injection Integration Tests
@responses.activate
def test_validate_and_inject_mapping_enforces_fls(self):
mock_describe_calls()
mapping = parse_from_yaml(
StringIO(
"Insert Accounts:\n sf_object: Account\n table: Account\n fields:\n - Nonsense__c"
)
)
org_config = DummyOrgConfig(
{"instance_url": "https://example.com", "access_token": "abc123"}, "test"
)
with pytest.raises(BulkDataException):
validate_and_inject_mapping(
mapping=mapping,
org_config=org_config,
namespace=None,
data_operation=DataOperationType.INSERT,
inject_namespaces=False,
drop_missing=False,
)
@responses.activate
def test_validate_and_inject_mapping_removes_steps_with_drop_missing(self):
mock_describe_calls()
mapping = parse_from_yaml(
StringIO(
"Insert Accounts:\n sf_object: NotAccount\n table: Account\n fields:\n - Nonsense__c"
)
)
org_config = DummyOrgConfig(
{"instance_url": "https://example.com", "access_token": "abc123"}, "test"
)
validate_and_inject_mapping(
mapping=mapping,
org_config=org_config,
namespace=None,
data_operation=DataOperationType.INSERT,
inject_namespaces=False,
drop_missing=True,
)
assert "Insert Accounts" not in mapping
@responses.activate
def test_validate_and_inject_mapping_removes_lookups_with_drop_missing(self):
mock_describe_calls()
mapping = parse_from_yaml(
StringIO(
(
"Insert Accounts:\n sf_object: NotAccount\n table: Account\n fields:\n - Nonsense__c\n"
"Insert Contacts:\n sf_object: Contact\n table: Contact\n lookups:\n AccountId:\n table: Account"
)
)
)
org_config = DummyOrgConfig(
{"instance_url": "https://example.com", "access_token": "abc123"}, "test"
)
validate_and_inject_mapping(
mapping=mapping,
org_config=org_config,
namespace=None,
data_operation=DataOperationType.INSERT,
inject_namespaces=False,
drop_missing=True,
)
assert "Insert Accounts" not in mapping
assert "Insert Contacts" in mapping
assert "AccountId" not in mapping["Insert Contacts"].lookups
@responses.activate
def test_validate_and_inject_mapping_throws_exception_required_lookup_dropped(self):
mock_describe_calls()
# This test uses a bit of gimmickry to validate exception behavior on dropping a required lookup.
# Since mapping_parser identifies target objects via the `table` clause rather than the actual schema,
# which makes us resilient to polymorphic lookups, we'll pretend the non-nillable `Id` field is a lookup.
mapping = parse_from_yaml(
StringIO(
(
"Insert Accounts:\n sf_object: NotAccount\n table: Account\n fields:\n - Nonsense__c\n"
"Insert Contacts:\n sf_object: Contact\n table: Contact\n lookups:\n Id:\n table: Account"
)
)
)
org_config = DummyOrgConfig(
{"instance_url": "https://example.com", "access_token": "abc123"}, "test"
)
with pytest.raises(BulkDataException):
validate_and_inject_mapping(
mapping=mapping,
org_config=org_config,
namespace=None,
data_operation=DataOperationType.INSERT,
inject_namespaces=False,
drop_missing=True,
)
@responses.activate
def test_validate_and_inject_mapping_injects_namespaces(self):
mock_describe_calls()
# Note: ns__Description__c is a mock field added to our stored, mock describes (in JSON)
ms = parse_from_yaml(
StringIO(
"""Insert Accounts:
sf_object: Account
table: Account
fields:
- Description__c"""
)
)["Insert Accounts"]
org_config = DummyOrgConfig(
{"instance_url": "https://example.com", "access_token": "abc123"}, "test"
)
assert ms.validate_and_inject_namespace(
org_config, "ns", DataOperationType.INSERT, inject_namespaces=True
)
assert list(ms.fields.keys()) == ["ns__Description__c"]
@responses.activate
def test_validate_and_inject_mapping_removes_namespaces(self):
mock_describe_calls()
# Note: History__c is a mock field added to our stored, mock describes (in JSON)
ms = parse_from_yaml(
StringIO(
"""Insert Accounts:
sf_object: Account
table: Account
fields:
- ns__History__c"""
)
)["Insert Accounts"]
org_config = DummyOrgConfig(
{"instance_url": "https://example.com", "access_token": "abc123"}, "test"
)
assert ms.validate_and_inject_namespace(
org_config, "ns", DataOperationType.INSERT, inject_namespaces=True
)
assert list(ms.fields.keys()) == ["History__c"]
@responses.activate
def test_validate_and_inject_mapping_queries_is_person_account_field(self):
mock_describe_calls()
mapping = parse_from_yaml(
StringIO(
(
"Insert Accounts:\n sf_object: Account\n table: Account\n fields:\n - Description__c\n"
"Insert Contacts:\n sf_object: Contact\n table: Contact\n lookups:\n AccountId:\n table: Account"
)
)
)
org_config = DummyOrgConfig(
{"instance_url": "https://example.com", "access_token": "abc123"}, "test"
)
validate_and_inject_mapping(
mapping=mapping,
org_config=org_config,
namespace=None,
data_operation=DataOperationType.QUERY,
inject_namespaces=False,
drop_missing=True,
org_has_person_accounts_enabled=True,
)
assert "Insert Accounts" in mapping
assert "Insert Contacts" in mapping
assert "IsPersonAccount" in mapping["Insert Accounts"]["fields"]
assert "IsPersonAccount" in mapping["Insert Contacts"]["fields"]
class TestMappingLookup:
def test_get_lookup_key_field__no_model(self):
lookup = MappingLookup(table="contact", name="AccountId")
assert lookup.get_lookup_key_field() == "AccountId"
def test_get_lookup_key_field__snake_case_model(self):
class FakeModel:
account_id = mock.MagicMock()
lookup = MappingLookup(table="contact", name="AccountId")
assert lookup.get_lookup_key_field(FakeModel()) == "account_id"
def test_get_lookup_key_field__by_key_field(self):
class FakeModel:
foo = mock.MagicMock()
lookup = MappingLookup(table="contact", key_field="foo", name="AccountId")
assert lookup.get_lookup_key_field(FakeModel()) == "foo"
def test_get_lookup_key_field__by_key_field_wrong_case(self):
class FakeModel:
account_id = mock.MagicMock()
# we can correct mismatched mapping files if the mistake is just
# old-fashioned SQL with new Mapping File
lookup = MappingLookup(table="contact", key_field="AccountId", name="AccountId")
assert lookup.get_lookup_key_field(FakeModel()) == "account_id"
def test_get_lookup_key_field__mismatched_name(self):
class FakeModel:
account_id = mock.MagicMock()
# some mistakes can't be fixed.
lookup = MappingLookup(table="contact", key_field="Foo", name="Foo")
with pytest.raises(KeyError):
lookup.get_lookup_key_field(FakeModel())
@responses.activate
def test_validate_and_inject_mapping_works_case_insensitively(self):
mock_describe_calls()
mapping = parse_from_yaml(
StringIO(
(
"Insert Accounts:\n sf_object: account\n table: account\n fields:\n - name\n"
"Insert Contacts:\n sf_object: contact\n table: contact\n fields:\n - fIRSTnAME\n lookups:\n accountid:\n table: account"
)
)
)
org_config = DummyOrgConfig(
{"instance_url": "https://example.com", "access_token": "abc123"}, "test"
)
assert mapping["Insert Accounts"].sf_object != "Account"
assert mapping["Insert Accounts"].sf_object == "account"
assert "name" in mapping["Insert Accounts"].fields
assert "Name" not in mapping["Insert Accounts"].fields
validate_and_inject_mapping(
mapping=mapping,
org_config=org_config,
namespace=None,
data_operation=DataOperationType.INSERT,
inject_namespaces=False,
drop_missing=False,
)
assert mapping["Insert Accounts"].sf_object == "Account"
assert mapping["Insert Accounts"].sf_object != "account"
assert "Name" in mapping["Insert Accounts"].fields
assert "name" not in mapping["Insert Accounts"].fields
@responses.activate
def test_bulk_attributes(self):
mapping = parse_from_yaml(
StringIO(
(
"""Insert Accounts:
sf_object: account
table: account
api: rest
bulk_mode: Serial
batch_size: 50
fields:
- name"""
)
)
)
assert mapping["Insert Accounts"].api == DataApi.REST
assert mapping["Insert Accounts"].bulk_mode == "Serial"
assert mapping["Insert Accounts"].batch_size == 50
| |
"""Learn to estimate functions from examples. (Chapters 18-20)"""
from utils import *
import copy, heapq, math, random
from collections import defaultdict
#______________________________________________________________________________
def rms_error(predictions, targets):
return math.sqrt(ms_error(predictions, targets))
def ms_error(predictions, targets):
return mean([(p - t)**2 for p, t in zip(predictions, targets)])
def mean_error(predictions, targets):
return mean([abs(p - t) for p, t in zip(predictions, targets)])
def mean_boolean_error(predictions, targets):
return mean([(p != t) for p, t in zip(predictions, targets)])
#______________________________________________________________________________
class DataSet:
"""A data set for a machine learning problem. It has the following fields:
d.examples A list of examples. Each one is a list of attribute values.
d.attrs A list of integers to index into an example, so example[attr]
gives a value. Normally the same as range(len(d.examples[0])).
d.attrnames Optional list of mnemonic names for corresponding attrs.
d.target The attribute that a learning algorithm will try to predict.
By default the final attribute.
d.inputs The list of attrs without the target.
d.values A list of lists: each sublist is the set of possible
values for the corresponding attribute. If initially None,
it is computed from the known examples by self.setproblem.
If not None, an erroneous value raises ValueError.
d.distance A function from a pair of examples to a nonnegative number.
Should be symmetric, etc. Defaults to mean_boolean_error
since that can handle any field types.
d.name Name of the data set (for output display only).
d.source URL or other source where the data came from.
Normally, you call the constructor and you're done; then you just
access fields like d.examples and d.target and d.inputs."""
def __init__(self, examples=None, attrs=None, attrnames=None, target=-1,
inputs=None, values=None, distance=mean_boolean_error,
name='', source='', exclude=()):
"""Accepts any of DataSet's fields. Examples can also be a
string or file from which to parse examples using parse_csv.
Optional parameter: exclude, as documented in .setproblem().
>>> DataSet(examples='1, 2, 3')
<DataSet(): 1 examples, 3 attributes>
"""
update(self, name=name, source=source, values=values, distance=distance)
# Initialize .examples from string or list or data directory
if isinstance(examples, str):
self.examples = parse_csv(examples)
elif examples is None:
self.examples = parse_csv(DataFile(name+'.csv').read())
else:
self.examples = examples
# Attrs are the indices of examples, unless otherwise stated.
if not attrs and self.examples:
attrs = range(len(self.examples[0]))
self.attrs = attrs
# Initialize .attrnames from string, list, or by default
if isinstance(attrnames, str):
self.attrnames = attrnames.split()
else:
self.attrnames = attrnames or attrs
self.setproblem(target, inputs=inputs, exclude=exclude)
def setproblem(self, target, inputs=None, exclude=()):
"""Set (or change) the target and/or inputs.
This way, one DataSet can be used multiple ways. inputs, if specified,
is a list of attributes, or specify exclude as a list of attributes
to not use in inputs. Attributes can be -n .. n, or an attrname.
Also computes the list of possible values, if that wasn't done yet."""
self.target = self.attrnum(target)
exclude = map(self.attrnum, exclude)
if inputs:
self.inputs = removeall(self.target, inputs)
else:
self.inputs = [a for a in self.attrs
if a != self.target and a not in exclude]
if not self.values:
self.values = map(unique, zip(*self.examples))
self.check_me()
def check_me(self):
"Check that my fields make sense."
assert len(self.attrnames) == len(self.attrs)
assert self.target in self.attrs
assert self.target not in self.inputs
assert set(self.inputs).issubset(set(self.attrs))
map(self.check_example, self.examples)
def add_example(self, example):
"Add an example to the list of examples, checking it first."
self.check_example(example)
self.examples.append(example)
def check_example(self, example):
"Raise ValueError if example has any invalid values."
if self.values:
for a in self.attrs:
if example[a] not in self.values[a]:
raise ValueError('Bad value %s for attribute %s in %s' %
(example[a], self.attrnames[a], example))
def attrnum(self, attr):
"Returns the number used for attr, which can be a name, or -n .. n-1."
if attr < 0:
return len(self.attrs) + attr
elif isinstance(attr, str):
return self.attrnames.index(attr)
else:
return attr
def sanitize(self, example):
"Return a copy of example, with non-input attributes replaced by None."
return [attr_i if i in self.inputs else None
for i, attr_i in enumerate(example)]
def __repr__(self):
return '<DataSet(%s): %d examples, %d attributes>' % (
self.name, len(self.examples), len(self.attrs))
#______________________________________________________________________________
def parse_csv(input, delim=','):
r"""Input is a string consisting of lines, each line has comma-delimited
fields. Convert this into a list of lists. Blank lines are skipped.
Fields that look like numbers are converted to numbers.
The delim defaults to ',' but '\t' and None are also reasonable values.
>>> parse_csv('1, 2, 3 \n 0, 2, na')
[[1, 2, 3], [0, 2, 'na']]
"""
lines = [line for line in input.splitlines() if line.strip()]
return [map(num_or_str, line.split(delim)) for line in lines]
#______________________________________________________________________________
class CountingProbDist:
"""A probability distribution formed by observing and counting examples.
If p is an instance of this class and o is an observed value, then
there are 3 main operations:
p.add(o) increments the count for observation o by 1.
p.sample() returns a random element from the distribution.
p[o] returns the probability for o (as in a regular ProbDist)."""
def __init__(self, observations=[], default=0):
"""Create a distribution, and optionally add in some observations.
By default this is an unsmoothed distribution, but saying default=1,
for example, gives you add-one smoothing."""
update(self, dictionary={}, n_obs=0.0, default=default, sampler=None)
for o in observations:
self.add(o)
def add(self, o):
"Add an observation o to the distribution."
self.smooth_for(o)
self.dictionary[o] += 1
self.n_obs += 1
self.sampler = None
def smooth_for(self, o):
"""Include o among the possible observations, whether or not
it's been observed yet."""
if o not in self.dictionary:
self.dictionary[o] = self.default
self.n_obs += self.default
self.sampler = None
def __getitem__(self, item):
"Return an estimate of the probability of item."
self.smooth_for(item)
return self.dictionary[item] / self.n_obs
# (top() and sample() are not used in this module, but elsewhere.)
def top(self, n):
"Return (count, obs) tuples for the n most frequent observations."
return heapq.nlargest(n, [(v, k) for (k, v) in self.dictionary.items()])
def sample(self):
"Return a random sample from the distribution."
if self.sampler is None:
self.sampler = weighted_sampler(self.dictionary.keys(),
self.dictionary.values())
return self.sampler()
#______________________________________________________________________________
def PluralityLearner(dataset):
"""A very dumb algorithm: always pick the result that was most popular
in the training data. Makes a baseline for comparison."""
most_popular = mode([e[dataset.target] for e in dataset.examples])
def predict(example):
"Always return same result: the most popular from the training set."
return most_popular
return predict
#______________________________________________________________________________
def NaiveBayesLearner(dataset):
"""Just count how many times each value of each input attribute
occurs, conditional on the target value. Count the different
target values too."""
targetvals = dataset.values[dataset.target]
target_dist = CountingProbDist(targetvals)
attr_dists = dict(((gv, attr), CountingProbDist(dataset.values[attr]))
for gv in targetvals
for attr in dataset.inputs)
for example in dataset.examples:
targetval = example[dataset.target]
target_dist.add(targetval)
for attr in dataset.inputs:
attr_dists[targetval, attr].add(example[attr])
def predict(example):
"""Predict the target value for example. Consider each possible value,
and pick the most likely by looking at each attribute independently."""
def class_probability(targetval):
return (target_dist[targetval]
* product(attr_dists[targetval, attr][example[attr]]
for attr in dataset.inputs))
return argmax(targetvals, class_probability)
return predict
#______________________________________________________________________________
def NearestNeighborLearner(dataset, k=1):
"k-NearestNeighbor: the k nearest neighbors vote."
def predict(example):
"Find the k closest, and have them vote for the best."
best = heapq.nsmallest(k, ((dataset.distance(e, example), e)
for e in dataset.examples))
return mode(e[dataset.target] for (d, e) in best)
return predict
#______________________________________________________________________________
class DecisionFork:
"""A fork of a decision tree holds an attribute to test, and a dict
of branches, one for each of the attribute's values."""
def __init__(self, attr, attrname=None, branches=None):
"Initialize by saying what attribute this node tests."
update(self, attr=attr, attrname=attrname or attr,
branches=branches or {})
def __call__(self, example):
"Given an example, classify it using the attribute and the branches."
attrvalue = example[self.attr]
return self.branches[attrvalue](example)
def add(self, val, subtree):
"Add a branch. If self.attr = val, go to the given subtree."
self.branches[val] = subtree
def display(self, indent=0):
name = self.attrname
print 'Test', name
for (val, subtree) in self.branches.items():
print ' '*4*indent, name, '=', val, '==>',
subtree.display(indent+1)
def __repr__(self):
return ('DecisionFork(%r, %r, %r)'
% (self.attr, self.attrname, self.branches))
class DecisionLeaf:
"A leaf of a decision tree holds just a result."
def __init__(self, result):
self.result = result
def __call__(self, example):
return self.result
def display(self, indent=0):
print 'RESULT =', self.result
def __repr__(self):
return repr(self.result)
#______________________________________________________________________________
def DecisionTreeLearner(dataset):
"[Fig. 18.5]"
target, values = dataset.target, dataset.values
def decision_tree_learning(examples, attrs, parent_examples=()):
if len(examples) == 0:
return plurality_value(parent_examples)
elif all_same_class(examples):
return DecisionLeaf(examples[0][target])
elif len(attrs) == 0:
return plurality_value(examples)
else:
A = choose_attribute(attrs, examples)
tree = DecisionFork(A, dataset.attrnames[A])
for (v_k, exs) in split_by(A, examples):
subtree = decision_tree_learning(
exs, removeall(A, attrs), examples)
tree.add(v_k, subtree)
return tree
def plurality_value(examples):
"""Return the most popular target value for this set of examples.
(If target is binary, this is the majority; otherwise plurality.)"""
popular = argmax_random_tie(values[target],
lambda v: count(target, v, examples))
return DecisionLeaf(popular)
def count(attr, val, examples):
return count_if(lambda e: e[attr] == val, examples)
def all_same_class(examples):
"Are all these examples in the same target class?"
class0 = examples[0][target]
return all(e[target] == class0 for e in examples)
def choose_attribute(attrs, examples):
"Choose the attribute with the highest information gain."
return argmax_random_tie(attrs,
lambda a: information_gain(a, examples))
def information_gain(attr, examples):
"Return the expected reduction in entropy from splitting by attr."
def I(examples):
return information_content([count(target, v, examples)
for v in values[target]])
N = float(len(examples))
remainder = sum((len(examples_i) / N) * I(examples_i)
for (v, examples_i) in split_by(attr, examples))
return I(examples) - remainder
def split_by(attr, examples):
"Return a list of (val, examples) pairs for each val of attr."
return [(v, [e for e in examples if e[attr] == v])
for v in values[attr]]
return decision_tree_learning(dataset.examples, dataset.inputs)
def information_content(values):
"Number of bits to represent the probability distribution in values."
probabilities = normalize(removeall(0, values))
return sum(-p * log2(p) for p in probabilities)
#______________________________________________________________________________
### A decision list is implemented as a list of (test, value) pairs.
def DecisionListLearner(dataset):
"""[Fig. 18.11]"""
def decision_list_learning(examples):
if not examples:
return [(True, False)]
t, o, examples_t = find_examples(examples)
if not t:
raise Failure
return [(t, o)] + decision_list_learning(examples - examples_t)
def find_examples(examples):
"""Find a set of examples that all have the same outcome under
some test. Return a tuple of the test, outcome, and examples."""
unimplemented()
def passes(example, test):
"Does the example pass the test?"
unimplemented()
def predict(example):
"Predict the outcome for the first passing test."
for test, outcome in predict.decision_list:
if passes(example, test):
return outcome
predict.decision_list = decision_list_learning(set(dataset.examples))
return predict
#______________________________________________________________________________
def NeuralNetLearner(dataset, sizes):
"""Layered feed-forward network."""
activations = map(lambda n: [0.0 for i in range(n)], sizes)
weights = []
def predict(example):
unimplemented()
return predict
class NNUnit:
"""Unit of a neural net."""
def __init__(self):
unimplemented()
def PerceptronLearner(dataset, sizes):
def predict(example):
return sum([])
unimplemented()
#______________________________________________________________________________
def Linearlearner(dataset):
"""Fit a linear model to the data."""
unimplemented()
#______________________________________________________________________________
def EnsembleLearner(learners):
"""Given a list of learning algorithms, have them vote."""
def train(dataset):
predictors = [learner(dataset) for learner in learners]
def predict(example):
return mode(predictor(example) for predictor in predictors)
return predict
return train
#______________________________________________________________________________
def AdaBoost(L, K):
"""[Fig. 18.34]"""
def train(dataset):
examples, target = dataset.examples, dataset.target
N = len(examples)
epsilon = 1./(2*N)
w = [1./N] * N
h, z = [], []
for k in range(K):
h_k = L(dataset, w)
h.append(h_k)
error = sum(weight for example, weight in zip(examples, w)
if example[target] != h_k(example))
# Avoid divide-by-0 from either 0% or 100% error rates:
error = clip(error, epsilon, 1-epsilon)
for j, example in enumerate(examples):
if example[target] == h_k(example):
w[j] *= error / (1. - error)
w = normalize(w)
z.append(math.log((1. - error) / error))
return WeightedMajority(h, z)
return train
def WeightedMajority(predictors, weights):
"Return a predictor that takes a weighted vote."
def predict(example):
return weighted_mode((predictor(example) for predictor in predictors),
weights)
return predict
def weighted_mode(values, weights):
"""Return the value with the greatest total weight.
>>> weighted_mode('abbaa', [1,2,3,1,2])
'b'"""
totals = defaultdict(int)
for v, w in zip(values, weights):
totals[v] += w
return max(totals.keys(), key=totals.get)
#_____________________________________________________________________________
# Adapting an unweighted learner for AdaBoost
def WeightedLearner(unweighted_learner):
"""Given a learner that takes just an unweighted dataset, return
one that takes also a weight for each example. [p. 749 footnote 14]"""
def train(dataset, weights):
return unweighted_learner(replicated_dataset(dataset, weights))
return train
def replicated_dataset(dataset, weights, n=None):
"Copy dataset, replicating each example in proportion to its weight."
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result
def weighted_replicate(seq, weights, n):
"""Return n selections from seq, with the count of each element of
seq proportional to the corresponding weight (filling in fractions
randomly).
>>> weighted_replicate('ABC', [1,2,1], 4)
['A', 'B', 'B', 'C']"""
assert len(seq) == len(weights)
weights = normalize(weights)
wholes = [int(w*n) for w in weights]
fractions = [(w*n) % 1 for w in weights]
return (flatten([x] * nx for x, nx in zip(seq, wholes))
+ weighted_sample_with_replacement(seq, fractions, n - sum(wholes)))
def flatten(seqs): return sum(seqs, [])
#_____________________________________________________________________________
# Functions for testing learners on examples
def test(predict, dataset, examples=None, verbose=0):
"Return the proportion of the examples that are correctly predicted."
if examples is None: examples = dataset.examples
if len(examples) == 0: return 0.0
right = 0.0
for example in examples:
desired = example[dataset.target]
output = predict(dataset.sanitize(example))
if output == desired:
right += 1
if verbose >= 2:
print ' OK: got %s for %s' % (desired, example)
elif verbose:
print 'WRONG: got %s, expected %s for %s' % (
output, desired, example)
return right / len(examples)
def train_and_test(learner, dataset, start, end):
"""Reserve dataset.examples[start:end] for test; train on the remainder.
Return the proportion of examples correct on the test examples."""
examples = dataset.examples
try:
dataset.examples = examples[:start] + examples[end:]
return test(learner(dataset), dataset, examples[start:end])
finally:
dataset.examples = examples
def cross_validation(learner, dataset, k=10, trials=1):
"""Do k-fold cross_validate and return their mean.
That is, keep out 1/k of the examples for testing on each of k runs.
Shuffle the examples first; If trials>1, average over several shuffles."""
if k is None:
k = len(dataset.examples)
if trials > 1:
return mean([cross_validation(learner, dataset, k, trials=1)
for t in range(trials)])
else:
n = len(dataset.examples)
random.shuffle(dataset.examples)
return mean([train_and_test(learner, dataset, i*(n/k), (i+1)*(n/k))
for i in range(k)])
def leave1out(learner, dataset):
"Leave one out cross-validation over the dataset."
return cross_validation(learner, dataset, k=len(dataset.examples))
def learningcurve(learner, dataset, trials=10, sizes=None):
if sizes is None:
sizes = range(2, len(dataset.examples)-10, 2)
def score(learner, size):
random.shuffle(dataset.examples)
return train_and_test(learner, dataset, 0, size)
return [(size, mean([score(learner, size) for t in range(trials)]))
for size in sizes]
#______________________________________________________________________________
# The rest of this file gives datasets for machine learning problems.
orings = DataSet(name='orings', target='Distressed',
attrnames="Rings Distressed Temp Pressure Flightnum")
zoo = DataSet(name='zoo', target='type', exclude=['name'],
attrnames="name hair feathers eggs milk airborne aquatic " +
"predator toothed backbone breathes venomous fins legs tail " +
"domestic catsize type")
iris = DataSet(name="iris", target="class",
attrnames="sepal-len sepal-width petal-len petal-width class")
#______________________________________________________________________________
# The Restaurant example from Fig. 18.2
def RestaurantDataSet(examples=None):
"Build a DataSet of Restaurant waiting examples. [Fig. 18.3]"
return DataSet(name='restaurant', target='Wait', examples=examples,
attrnames='Alternate Bar Fri/Sat Hungry Patrons Price '
+ 'Raining Reservation Type WaitEstimate Wait')
restaurant = RestaurantDataSet()
def T(attrname, branches):
branches = dict((value, (child if isinstance(child, DecisionFork)
else DecisionLeaf(child)))
for value, child in branches.items())
return DecisionFork(restaurant.attrnum(attrname), attrname, branches)
Fig[18,2] = T('Patrons',
{'None': 'No', 'Some': 'Yes', 'Full':
T('WaitEstimate',
{'>60': 'No', '0-10': 'Yes',
'30-60':
T('Alternate', {'No':
T('Reservation', {'Yes': 'Yes', 'No':
T('Bar', {'No':'No',
'Yes':'Yes'})}),
'Yes':
T('Fri/Sat', {'No': 'No', 'Yes': 'Yes'})}),
'10-30':
T('Hungry', {'No': 'Yes', 'Yes':
T('Alternate',
{'No': 'Yes', 'Yes':
T('Raining', {'No': 'No', 'Yes': 'Yes'})})})})})
__doc__ += """
[Fig. 18.6]
>>> random.seed(437)
>>> restaurant_tree = DecisionTreeLearner(restaurant)
>>> restaurant_tree.display()
Test Patrons
Patrons = None ==> RESULT = No
Patrons = Full ==> Test Hungry
Hungry = Yes ==> Test Type
Type = Burger ==> RESULT = Yes
Type = Thai ==> Test Fri/Sat
Fri/Sat = Yes ==> RESULT = Yes
Fri/Sat = No ==> RESULT = No
Type = French ==> RESULT = Yes
Type = Italian ==> RESULT = No
Hungry = No ==> RESULT = No
Patrons = Some ==> RESULT = Yes
"""
def SyntheticRestaurant(n=20):
"Generate a DataSet with n examples."
def gen():
example = map(random.choice, restaurant.values)
example[restaurant.target] = Fig[18,2](example)
return example
return RestaurantDataSet([gen() for i in range(n)])
#______________________________________________________________________________
# Artificial, generated datasets.
def Majority(k, n):
"""Return a DataSet with n k-bit examples of the majority problem:
k random bits followed by a 1 if more than half the bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(int(sum(bits) > k/2))
examples.append(bits)
return DataSet(name="majority", examples=examples)
def Parity(k, n, name="parity"):
"""Return a DataSet with n k-bit examples of the parity problem:
k random bits followed by a 1 if an odd number of bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(sum(bits) % 2)
examples.append(bits)
return DataSet(name=name, examples=examples)
def Xor(n):
"""Return a DataSet with n examples of 2-input xor."""
return Parity(2, n, name="xor")
def ContinuousXor(n):
"2 inputs are chosen uniformly from (0.0 .. 2.0]; output is xor of ints."
examples = []
for i in range(n):
x, y = [random.uniform(0.0, 2.0) for i in '12']
examples.append([x, y, int(x) != int(y)])
return DataSet(name="continuous xor", examples=examples)
#______________________________________________________________________________
def compare(algorithms=[PluralityLearner, NaiveBayesLearner,
NearestNeighborLearner, DecisionTreeLearner],
datasets=[iris, orings, zoo, restaurant, SyntheticRestaurant(20),
Majority(7, 100), Parity(7, 100), Xor(100)],
k=10, trials=1):
"""Compare various learners on various datasets using cross-validation.
Print results as a table."""
print_table([[a.__name__.replace('Learner','')] +
[cross_validation(a, d, k, trials) for d in datasets]
for a in algorithms],
header=[''] + [d.name[0:7] for d in datasets], numfmt='%.2f')
| |
import json
from django.contrib.auth.decorators import login_required
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.http import (HttpResponse, HttpResponseBadRequest,
HttpResponseForbidden)
from django.shortcuts import get_object_or_404, render
from django.template.context_processors import csrf
from django.template.loader import render_to_string
from bootcamp.activities.models import Activity
from bootcamp.decorators import ajax_required
from bootcamp.feeds.models import Feed
FEEDS_NUM_PAGES = 10
@login_required
def feeds(request):
all_feeds = Feed.get_feeds()
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
feeds = paginator.page(1)
from_feed = -1
if feeds:
from_feed = feeds[0].id
return render(request, 'feeds/feeds.html', {
'feeds': feeds,
'from_feed': from_feed,
'page': 1,
})
def feed(request, pk):
feed = get_object_or_404(Feed, pk=pk)
return render(request, 'feeds/feed.html', {'feed': feed})
@login_required
@ajax_required
def load(request):
from_feed = request.GET.get('from_feed')
page = request.GET.get('page')
feed_source = request.GET.get('feed_source')
all_feeds = Feed.get_feeds(from_feed)
if feed_source != 'all':
all_feeds = all_feeds.filter(user__id=feed_source)
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
try:
feeds = paginator.page(page)
except PageNotAnInteger:
return HttpResponseBadRequest()
except EmptyPage:
feeds = []
html = ''
csrf_token = (csrf(request)['csrf_token'])
for feed in feeds:
html = '{0}{1}'.format(html,
render_to_string('feeds/partial_feed.html',
{
'feed': feed,
'user': request.user,
'csrf_token': csrf_token
}))
return HttpResponse(html)
def _html_feeds(last_feed, user, csrf_token, feed_source='all'):
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
html = ''
for feed in feeds:
html = '{0}{1}'.format(html,
render_to_string('feeds/partial_feed.html',
{
'feed': feed,
'user': user,
'csrf_token': csrf_token
}))
return html
@login_required
@ajax_required
def load_new(request):
last_feed = request.GET.get('last_feed')
user = request.user
csrf_token = (csrf(request)['csrf_token'])
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@login_required
@ajax_required
def check(request):
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
count = feeds.count()
return HttpResponse(count)
@login_required
@ajax_required
def post(request):
last_feed = request.POST.get('last_feed')
user = request.user
csrf_token = (csrf(request)['csrf_token'])
feed = Feed()
feed.user = user
post = request.POST['post']
post = post.strip()
if len(post) > 0:
feed.post = post[:255]
feed.save()
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@login_required
@ajax_required
def like(request):
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
user = request.user
like = Activity.objects.filter(activity_type=Activity.LIKE, feed=feed_id,
user=user)
if like:
user.profile.unotify_liked(feed)
like.delete()
else:
like = Activity(activity_type=Activity.LIKE, feed=feed_id, user=user)
like.save()
user.profile.notify_liked(feed)
return HttpResponse(feed.calculate_likes())
@login_required
@ajax_required
def comment(request):
if request.method == 'POST':
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
post = request.POST['post']
post = post.strip()
if len(post) > 0:
post = post[:255]
user = request.user
feed.comment(user=user, post=post)
user.profile.notify_commented(feed)
user.profile.notify_also_commented(feed)
return render(request, 'feeds/partial_feed_comments.html',
{'feed': feed})
else:
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'feeds/partial_feed_comments.html',
{'feed': feed})
@login_required
@ajax_required
def update(request):
first_feed = request.GET.get('first_feed')
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds().filter(id__range=(last_feed, first_feed))
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
dump = {}
for feed in feeds:
dump[feed.pk] = {'likes': feed.likes, 'comments': feed.comments}
data = json.dumps(dump)
return HttpResponse(data, content_type='application/json')
@login_required
@ajax_required
def track_comments(request):
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
if len(feed.get_comments()) > 0:
return render(
request, 'feeds/partial_feed_comments.html', {'feed': feed})
else:
return HttpResponse()
@login_required
@ajax_required
def remove(request):
try:
feed_id = request.POST.get('feed')
feed = Feed.objects.get(pk=feed_id)
if feed.user == request.user:
likes = feed.get_likes()
parent = feed.parent
for like in likes:
like.delete()
feed.delete()
if parent:
parent.calculate_comments()
return HttpResponse()
else:
return HttpResponseForbidden()
except Exception:
return HttpResponseBadRequest()
| |
"""
This module contains the base Script class that all
scripts are inheriting from.
It also defines a few common scripts.
"""
from time import time
from twisted.internet.defer import maybeDeferred
from twisted.internet.task import LoopingCall
from django.conf import settings
from django.utils.translation import ugettext as _
from src.typeclasses.typeclass import TypeClass
from src.scripts.models import ScriptDB
from src.comms import channelhandler
from src.utils import logger
__all__ = ["Script", "DoNothing", "CheckSessions",
"ValidateScripts", "ValidateChannelHandler"]
_SESSIONS = None
# attr-cache size in MB
_ATTRIBUTE_CACHE_MAXSIZE = settings.ATTRIBUTE_CACHE_MAXSIZE
#
# Base script, inherit from Script below instead.
#
class ScriptClass(TypeClass):
"""
Base class for scripts. Don't inherit from this, inherit
from the class 'Script' instead.
"""
# private methods
def __eq__(self, other):
"""
This has to be located at this level, having it in the
parent doesn't work.
"""
try:
return other.dbid == self.dbid
except Exception:
return False
def _start_task(self, start_now=True):
"start task runner"
self.ndb.twisted_task = LoopingCall(self._step_task)
if self.ndb._paused_time:
# we had paused the script, restarting
#print " start with paused time:", self.key, self.ndb._paused_time
self.ndb.twisted_task.start(self.ndb._paused_time, now=False)
else:
# starting script anew.
#print "_start_task: self.interval:", self.key, self.dbobj.interval
self.ndb.twisted_task.start(self.dbobj.interval,
now=start_now and not self.start_delay)
self.ndb.time_last_called = int(time())
def _stop_task(self):
"stop task runner"
try:
#print "stopping twisted task:", id(self.ndb.twisted_task), self.obj
if self.ndb.twisted_task and self.ndb.twisted_task.running:
self.ndb.twisted_task.stop()
except Exception:
logger.log_trace()
def _step_err_callback(self, e):
"callback for runner errors"
cname = self.__class__.__name__
estring = _("Script %(key)s(#%(dbid)i) of type '%(cname)s': at_repeat() error '%(err)s'.") % \
{"key": self.key, "dbid": self.dbid, "cname": cname,
"err": e.getErrorMessage()}
try:
self.dbobj.db_obj.msg(estring)
except Exception:
pass
logger.log_errmsg(estring)
def _step_succ_callback(self):
"step task runner. No try..except needed due to defer wrap."
if not self.is_valid():
self.stop()
return
self.at_repeat()
repeats = self.dbobj.db_repeats
if repeats <= 0:
pass # infinite repeat
elif repeats == 1:
self.stop()
return
else:
self.dbobj.db_repeats -= 1
self.ndb.time_last_called = int(time())
self.save()
if self.ndb._paused_time:
# this means we were running an unpaused script, for the
# time remaining after the pause. Now we start a normal-running
# timer again.
# print "switching to normal run:", self.key
del self.ndb._paused_time
self._stop_task()
self._start_task(start_now=False)
def _step_task(self):
"step task"
try:
d = maybeDeferred(self._step_succ_callback)
d.addErrback(self._step_err_callback)
return d
except Exception:
logger.log_trace()
# Public methods
def time_until_next_repeat(self):
"""
Returns the time in seconds until the script will be
run again. If this is not a stepping script, returns None.
This is not used in any way by the script's stepping
system; it's only here for the user to be able to
check in on their scripts and when they will next be run.
"""
try:
if self.ndb._paused_time:
return max(0, (self.ndb.time_last_called + self.ndb._paused_time) - int(time()))
else:
return max(0, (self.ndb.time_last_called + self.dbobj.db_interval) - int(time()))
except Exception:
return None
def start(self, force_restart=False):
"""
Called every time the script is started (for
persistent scripts, this is usually once every server start)
force_restart - if True, will always restart the script, regardless
of if it has started before.
returns 0 or 1 to indicated the script has been started or not.
Used in counting.
"""
#print "Script %s (%s) start (active:%s, force:%s) ..." % (self.key, id(self.dbobj),
# self.is_active, force_restart)
if self.dbobj.is_active and not force_restart:
# script already runs and should not be restarted.
return 0
obj = self.obj
if obj:
# check so the scripted object is valid and initalized
try:
object.__getattribute__(obj.dbobj, 'cmdset')
except AttributeError:
# this means the object is not initialized.
logger.log_trace()
self.dbobj.is_active = False
return 0
# try to restart a paused script
if self.unpause():
return 1
# try to start the script from scratch
try:
self.dbobj.is_active = True
self.at_start()
if self.dbobj.db_interval > 0:
self._start_task()
return 1
except Exception:
logger.log_trace()
self.dbobj.is_active = False
return 0
def stop(self, kill=False):
"""
Called to stop the script from running.
This also deletes the script.
kill - don't call finishing hooks.
"""
#print "stopping script %s" % self.key
#import pdb
#pdb.set_trace()
if not kill:
try:
self.at_stop()
except Exception:
logger.log_trace()
if self.dbobj.db_interval > 0:
try:
self._stop_task()
except Exception:
logger.log_trace("Stopping script %s(%s)" % (self.key, self.dbid))
pass
try:
self.dbobj.delete()
except AssertionError:
logger.log_trace()
return 0
return 1
def pause(self):
"""
This stops a running script and stores its active state.
"""
#print "pausing", self.key, self.time_until_next_repeat()
dt = self.time_until_next_repeat()
if dt is None:
return
self.db._paused_time = dt
self._stop_task()
def unpause(self):
"""
Restart a paused script. This WILL call at_start().
"""
#print "unpausing", self.key, self.db._paused_time
dt = self.db._paused_time
if dt is None:
return False
try:
self.dbobj.is_active = True
self.at_start()
self.ndb._paused_time = dt
self._start_task(start_now=False)
del self.db._paused_time
except Exception:
logger.log_trace()
self.dbobj.is_active = False
return False
return True
# hooks
def at_script_creation(self):
"placeholder"
pass
def is_valid(self):
"placeholder"
pass
def at_start(self):
"placeholder."
pass
def at_stop(self):
"placeholder"
pass
def at_repeat(self):
"placeholder"
pass
def at_init(self):
"called when typeclass re-caches. Usually not used for scripts."
pass
#
# Base Script - inherit from this
#
class Script(ScriptClass):
"""
This is the class you should inherit from, it implements
the hooks called by the script machinery.
"""
def __init__(self, dbobj):
"""
This is the base TypeClass for all Scripts. Scripts describe events,
timers and states in game, they can have a time component or describe
a state that changes under certain conditions.
Script API:
* Available properties (only available on initiated Typeclass objects)
key (string) - name of object
name (string)- same as key
aliases (list of strings) - aliases to the object. Will be saved to
database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
dbobj (Object, read-only) - link to database model. dbobj.typeclass
points back to this class
typeclass (Object, read-only) - this links back to this class as an
identified only. Use self.swap_typeclass() to switch.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
desc (string) - optional description of script, shown in listings
obj (Object) - optional object that this script is connected to
and acts on (set automatically
by obj.scripts.add())
interval (int) - how often script should run, in seconds.
<=0 turns off ticker
start_delay (bool) - if the script should start repeating right
away or wait self.interval seconds
repeats (int) - how many times the script should repeat before
stopping. <=0 means infinite repeats
persistent (bool) - if script should survive a server shutdown or not
is_active (bool) - if script is currently running
* Handlers
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this
self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not
create a database entry when storing data
* Helper methods
start() - start script (this usually happens automatically at creation
and obj.script.add() etc)
stop() - stop script, and delete it
pause() - put the script on hold, until unpause() is called. If script
is persistent, the pause state will survive a shutdown.
unpause() - restart a previously paused script. The script will
continue as if it was never paused.
time_until_next_repeat() - if a timed script (interval>0), returns
time until next tick
* Hook methods
at_script_creation() - called only once, when an object of this
class is first created.
is_valid() - is called to check if the script is valid to be running
at the current time. If is_valid() returns False, the
running script is stopped and removed from the game. You
can use this to check state changes (i.e. an script
tracking some combat stats at regular intervals is only
valid to run while there is actual combat going on).
at_start() - Called every time the script is started, which for
persistent scripts is at least once every server start.
Note that this is unaffected by self.delay_start, which
only delays the first call to at_repeat().
at_repeat() - Called every self.interval seconds. It will be called
immediately upon launch unless self.delay_start is True,
which will delay the first call of this method by
self.interval seconds. If self.interval<=0, this method
will never be called.
at_stop() - Called as the script object is stopped and is about to
be removed from the game, e.g. because is_valid()
returned False or self.stop() was called manually.
at_server_reload() - Called when server reloads. Can be used to save
temporary variables you want should survive a reload.
at_server_shutdown() - called at a full server shutdown.
"""
super(Script, self).__init__(dbobj)
def at_script_creation(self):
"""
Only called once, by the create function.
"""
self.key = "<unnamed>"
self.desc = ""
self.interval = 0 # infinite
self.start_delay = False
self.repeats = 0 # infinite
self.persistent = False
def is_valid(self):
"""
Is called to check if the script is valid to run at this time.
Should return a boolean. The method is assumed to collect all needed
information from its related self.obj.
"""
return True
def at_start(self):
"""
Called whenever the script is started, which for persistent
scripts is at least once every server start. It will also be called
when starting again after a pause (such as after a server reload)
"""
pass
def at_repeat(self):
"""
Called repeatedly if this Script is set to repeat
regularly.
"""
pass
def at_stop(self):
"""
Called whenever when it's time for this script to stop
(either because is_valid returned False or )
"""
pass
def at_server_reload(self):
"""
This hook is called whenever the server is shutting down for
restart/reboot. If you want to, for example, save non-persistent
properties across a restart, this is the place to do it.
"""
pass
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully
(i.e. not for a restart).
"""
pass
# Some useful default Script types used by Evennia.
class DoNothing(Script):
"An script that does nothing. Used as default fallback."
def at_script_creation(self):
"Setup the script"
self.key = "sys_do_nothing"
self.desc = _("This is an empty placeholder script.")
class Store(Script):
"Simple storage script"
def at_script_creation(self):
"Setup the script"
self.key = "sys_storage"
self.desc = _("This is a generic storage container.")
class CheckSessions(Script):
"Check sessions regularly."
def at_script_creation(self):
"Setup the script"
self.key = "sys_session_check"
self.desc = _("Checks sessions so they are live.")
self.interval = 60 # repeat every 60 seconds
self.persistent = True
def at_repeat(self):
"called every 60 seconds"
global _SESSIONS
if not _SESSIONS:
from src.server.sessionhandler import SESSIONS as _SESSIONS
#print "session check!"
#print "ValidateSessions run"
_SESSIONS.validate_sessions()
class ValidateScripts(Script):
"Check script validation regularly"
def at_script_creation(self):
"Setup the script"
self.key = "sys_scripts_validate"
self.desc = _("Validates all scripts regularly.")
self.interval = 3600 # validate every hour.
self.persistent = True
def at_repeat(self):
"called every hour"
#print "ValidateScripts run."
ScriptDB.objects.validate()
class ValidateChannelHandler(Script):
"Update the channelhandler to make sure it's in sync."
def at_script_creation(self):
"Setup the script"
self.key = "sys_channels_validate"
self.desc = _("Updates the channel handler")
self.interval = 3700 # validate a little later than ValidateScripts
self.persistent = True
def at_repeat(self):
"called every hour+"
#print "ValidateChannelHandler run."
channelhandler.CHANNELHANDLER.update()
#class ClearAttributeCache(Script):
# "Clear the attribute cache."
# def at_script_creation(self):
# "Setup the script"
# self.key = "sys_cache_clear"
# self.desc = _("Clears the Attribute Cache")
# self.interval = 3600 * 2
# self.persistent = True
# def at_repeat(self):
# "called every 2 hours. Sets a max attr-cache limit to 100 MB." # enough for normal usage?
# if is_pypy:
# # pypy don't support get_size, so we have to skip out here.
# return
# attr_cache_size, _, _ = caches.get_cache_sizes()
# if attr_cache_size > _ATTRIBUTE_CACHE_MAXSIZE:
# caches.flush_attr_cache()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for OptimizerV2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import losses
from tensorflow.python.keras import optimizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adadelta
from tensorflow.python.keras.optimizer_v2 import adagrad
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.optimizer_v2 import adamax
from tensorflow.python.keras.optimizer_v2 import ftrl
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.optimizer_v2 import nadam
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum
from tensorflow.python.training import training_util
_DATA_TYPES = [dtypes.half, dtypes.float32, dtypes.float64]
# TODO(b/141710709): complex support in NVCC and ROCM.
if (not test_util.IsBuiltWithNvcc() and not test.is_built_with_rocm()):
_DATA_TYPES += [dtypes.complex64, dtypes.complex128]
class OptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testBasic(self):
for dtype in _DATA_TYPES:
with test_util.use_gpu():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testAdaptiveLearningRate(self):
for dtype in _DATA_TYPES:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(1.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, [var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
# var0 = [1., 2.] - 1.0 * [5, 5]
self.assertAllClose([-4., -3.], self.evaluate(var0))
# var1 = [3., 4.] - 1.0 * [3, 3]
self.assertAllClose([0., 1.], self.evaluate(var1))
sgd.learning_rate = 0.5
if context.executing_eagerly():
sgd.minimize(loss, [var0, var1])
else:
self.evaluate(opt_op)
# Validate updated params
# var0 = [-4., -3.] - 0.5 * [5, 5]
self.assertAllClose([-6.5, -5.5], self.evaluate(var0))
# var1 = [0., 1.] - 0.5 * [3, 3]
self.assertAllClose([-1.5, -0.5], self.evaluate(var1))
sgd.learning_rate = learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.5)
if context.executing_eagerly():
sgd.minimize(loss, [var0, var1])
else:
self.evaluate(opt_op)
@test_util.run_in_graph_and_eager_modes
def testPrecomputedGradient(self):
for dtype in _DATA_TYPES:
with test_util.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
grad_loss = constant_op.constant([42, -42], dtype=dtype)
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1], grad_loss=grad_loss)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
self.evaluate(var0))
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testNoGradients(self):
for dtype in _DATA_TYPES:
with test_util.use_gpu():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 # pylint: disable=cell-var-from-loop
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegexp(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_Minimize(self):
for dtype in _DATA_TYPES:
with test_util.use_gpu():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
loss = lambda: constant_op.constant(5.0)
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_ApplyGradients(self):
for dtype in _DATA_TYPES:
with test_util.use_gpu():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
sgd_op = gradient_descent.SGD(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@test_util.run_in_graph_and_eager_modes
def testGradientsAsVariables(self):
for i, dtype in enumerate(_DATA_TYPES):
with test_util.use_gpu():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
loss = lambda: 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd = gradient_descent.SGD(3.0)
grads_and_vars = sgd._compute_gradients(loss, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
resource_variable_ops.ResourceVariable(
array_ops.zeros([2], dtype), name='c_%d_%d' % (i, j))
for j, gv in enumerate(grads_and_vars)
]
convert_ops = [
state_ops.assign(converted_grads[j], gv[0])
for j, gv in enumerate(grads_and_vars)
]
# Run convert_ops to achieve the gradients converting
self.evaluate(variables.global_variables_initializer())
self.evaluate(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd.apply_gradients(converted_grads_and_vars)
self.evaluate(variables.global_variables_initializer())
self.evaluate(convert_ops)
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testComputeGradientsWithTensors(self):
with test_util.use_gpu():
x = ops.convert_to_tensor_v2(1.0)
def f():
return x * x
sgd = gradient_descent.SGD(3.0)
grads_and_vars = sgd._compute_gradients(f, [x])
self.assertLen(grads_and_vars, 1)
grad, x_as_var = grads_and_vars[0]
self.assertIs(x, x_as_var)
self.assertEqual(2.0, self.evaluate(grad))
with self.assertRaises(NotImplementedError):
sgd.apply_gradients(grads_and_vars)
@test_util.run_in_graph_and_eager_modes
def testConstraint(self):
constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
with test_util.use_gpu():
var0 = variables.Variable([1.0, 2.0],
constraint=constraint_01)
var1 = variables.Variable([3.0, 4.0],
constraint=constraint_0)
loss = lambda: 5 * var0 + 3 * var1
sgd = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd.minimize(loss, var_list=[var0, var1])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-0.1, -0.1], self.evaluate(var0))
self.assertAllClose([0., 0.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testIterationWithoutMinimize(self):
with test_util.use_gpu():
sgd = gradient_descent.SGD(3.0)
self.evaluate(sgd.iterations.initializer)
self.assertEqual(0, self.evaluate(sgd.iterations))
@test_util.run_in_graph_and_eager_modes
def testConfig(self):
with test_util.use_gpu():
opt = gradient_descent.SGD(learning_rate=1.0)
config = opt.get_config()
opt2 = gradient_descent.SGD.from_config(config)
lr = opt._get_hyper('learning_rate')
lr2 = opt2._get_hyper('learning_rate')
self.evaluate(variables.global_variables_initializer())
# assert both are equal float values.
self.assertEqual(self.evaluate(lr), self.evaluate(lr2))
var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32)
loss = lambda: 3 * var0
# learning rate variable created when calling minimize.
opt.minimize(loss, [var0])
opt3 = gradient_descent.SGD.from_config(config)
lr3 = opt3._get_hyper('learning_rate')
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(lr), self.evaluate(lr3))
@test_util.run_in_graph_and_eager_modes
def testConfigWithLearningRateDecay(self):
with test_util.use_gpu():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtypes.float32)
for decay_schedule in [
learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.1),
learning_rate_schedule.PiecewiseConstantDecay(
[5], [1., .5])
]:
step = 10
opt = gradient_descent.SGD(decay_schedule)
config = opt.get_config()
opt2 = gradient_descent.SGD.from_config(config)
# assert both are equal float values.
self.assertAllEqual(
decay_schedule(step),
opt._get_hyper('learning_rate')(step))
self.assertAllEqual(
decay_schedule(step),
opt2._get_hyper('learning_rate')(step))
loss = lambda: 3 * var0
# learning rate variable is created when calling minimize.
opt.minimize(loss, [var0])
self.evaluate(variables.global_variables_initializer())
config = opt.get_config()
opt3 = gradient_descent.SGD.from_config(config)
self.assertAllEqual(
self.evaluate(opt._get_hyper('learning_rate')(step)),
opt3._get_hyper('learning_rate')(step))
@test_util.run_in_graph_and_eager_modes
def testGradClipValue(self):
with test_util.use_gpu():
var = resource_variable_ops.ResourceVariable([1.0, 2.0])
loss = lambda: 3 * var
opt = gradient_descent.SGD(learning_rate=1.0, clipvalue=1.0)
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0., 1.], self.evaluate(var))
@test_util.run_in_graph_and_eager_modes
def testGradClipNorm(self):
with test_util.use_gpu():
var = resource_variable_ops.ResourceVariable([1.0])
loss = lambda: 3 * var
opt = gradient_descent.SGD(learning_rate=1.0, clipnorm=1.0)
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.], self.evaluate(var))
@test_util.run_in_graph_and_eager_modes
def testInvalidClipNorm(self):
with self.assertRaisesRegexp(ValueError, '>= 0'):
gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0)
@test_util.run_in_graph_and_eager_modes
def testInvalidKwargs(self):
with self.assertRaisesRegexp(TypeError, 'Unexpected keyword argument'):
gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0)
@test_util.run_in_graph_and_eager_modes
def testWeights(self):
with test_util.use_gpu():
opt1 = adam.Adam(learning_rate=1.0)
var1 = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
loss1 = lambda: 3 * var1
opt_op_1 = opt1.minimize(loss1, [var1])
self.evaluate(variables.global_variables_initializer())
config = opt1.get_config()
opt2 = adam.Adam.from_config(config)
var2 = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
loss2 = lambda: 3 * var2
opt_op_2 = opt2.minimize(loss2, [var2])
weights = opt1.get_weights()
# Assert set_weights and both variables get updated to same value.
self.evaluate(variables.global_variables_initializer())
opt2.set_weights(weights)
self.evaluate([opt_op_1, opt_op_2])
self.assertAllClose(self.evaluate(var1), self.evaluate(var2))
self.assertEqual(1, self.evaluate(opt1.iterations))
self.assertEqual(1, self.evaluate(opt2.iterations))
var3 = resource_variable_ops.ResourceVariable([1.0, 2.0, 3.0],
dtype=dtypes.float32)
var4 = resource_variable_ops.ResourceVariable([4.0, 5.0, 6.0],
dtype=dtypes.float32)
loss3 = lambda: 3 * var3 + 5 * var4
opt_op_3 = opt1.minimize(loss3, [var3, var4])
# Assert set_weights with ValueError since weight list does not match.
self.evaluate(variables.global_variables_initializer())
weights = opt1.get_weights()
with self.assertRaisesRegexp(ValueError, 'but the optimizer was'):
opt2.set_weights(weights)
# Assert set_weights and variables get updated to same value.
var5 = resource_variable_ops.ResourceVariable([1.0, 2.0, 3.0],
dtype=dtypes.float32)
var6 = resource_variable_ops.ResourceVariable([4.0, 5.0, 6.0],
dtype=dtypes.float32)
loss4 = lambda: 3 * var5 + 5 * var6
opt_op_4 = opt2.minimize(loss4, [var5, var6])
self.evaluate(variables.global_variables_initializer())
opt2.set_weights(weights)
self.evaluate([opt_op_3, opt_op_4])
self.assertAllClose(
self.evaluate([var3, var4]), self.evaluate([var5, var6]))
@test_util.run_in_graph_and_eager_modes
def testGettingHyperParameters(self):
opt = adam.Adam(learning_rate=1.0)
var = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
lr = self.evaluate(opt.lr)
self.assertEqual(1.0, lr)
opt.lr = 2.0
lr = self.evaluate(opt.lr)
self.assertEqual(2.0, lr)
self.evaluate(opt.lr.assign(3.0))
lr = self.evaluate(opt.lr)
self.assertEqual(3.0, lr)
with self.assertRaises(AttributeError):
opt.not_an_attr += 3
@test_util.run_in_graph_and_eager_modes
def testGettingHyperParametersWithLrInConstructor(self):
opt = gradient_descent.SGD(lr=3.0)
var = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertTrue(isinstance(opt.lr, resource_variable_ops.ResourceVariable))
self.assertTrue(
isinstance(opt.learning_rate, resource_variable_ops.ResourceVariable))
lr = self.evaluate(opt.lr)
self.assertEqual(3.0, lr)
opt.lr = 2.0
lr = self.evaluate(opt.lr)
self.assertEqual(2.0, lr)
self.evaluate(opt.lr.assign(4.0))
lr = self.evaluate(opt.lr)
self.assertEqual(4.0, lr)
@test_util.run_in_graph_and_eager_modes
def testOptimizerWithKerasModel(self):
a = input_layer.Input(shape=(3,), name='input_a')
b = input_layer.Input(shape=(3,), name='input_b')
dense = core.Dense(4, name='dense')
c = dense(a)
d = dense(b)
e = core.Dropout(0.5, name='dropout')(c)
model = training.Model([a, b], [d, e])
optimizer = gradient_descent.SGD(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 4))
output_e_np = np.random.random((10, 4))
model.fit([input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
batch_size=5)
@test_util.run_in_graph_and_eager_modes
def testOptimizerWithCallbacks(self):
np.random.seed(1331)
input_np = np.random.random((10, 3))
output_np = np.random.random((10, 4))
a = input_layer.Input(shape=(3,), name='input_a')
model = sequential.Sequential()
model.add(core.Dense(4, kernel_initializer='zeros', name='dense'))
model.add(core.Dropout(0.5, name='dropout'))
model(a)
optimizer = gradient_descent.SGD(learning_rate=0.1)
model.compile(optimizer, loss='mse', metrics=['mae'])
# This does not reduce the LR after the first epoch (due to low delta).
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5)
]
model.fit(
input_np,
output_np,
batch_size=10,
validation_data=(input_np, output_np),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
# This should reduce the LR after the first epoch (due to high delta).
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
input_np,
output_np,
batch_size=10,
validation_data=(input_np, output_np),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def testOptimizerSetIterations(self):
global_step = training_util.get_or_create_global_step()
opt = adam.Adam(learning_rate=1.0)
opt.iterations = global_step
var = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
self.evaluate(variables.global_variables_initializer())
init_step_value = self.evaluate(global_step)
loss = lambda: 3 * var
opt_op = opt.minimize(loss, [var])
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
new_step_value = self.evaluate(global_step)
self.assertEqual(new_step_value, init_step_value + 1)
@test_util.run_in_graph_and_eager_modes
def testOptimizerWithCallableVarList(self):
train_samples = 20
input_dim = 1
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 1
model = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes)
opt = adam.Adam()
loss = lambda: losses.mean_squared_error(model(x), y)
var_list = lambda: model.trainable_weights
with self.assertRaisesRegexp(
ValueError, 'Weights for model .* have not yet been created'):
var_list()
train_op = opt.minimize(loss, var_list)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
[[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
self.evaluate(train_op)
self.assertNotEqual(
[[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
self.assertLen(var_list(), 4)
def testVarKey(self):
with ops.get_default_graph().as_default():
a = variables.Variable([1., 2.], name='var')
b = variables.Variable([1.], name='var')
self.assertTrue(a._in_graph_mode)
self.assertTrue(b._in_graph_mode)
var_key = optimizer_v2._var_key(a)
self.assertEqual('var', var_key)
var_key = optimizer_v2._var_key(b)
self.assertEqual('var_1', var_key)
def testVarName(self):
with ops.get_default_graph().as_default():
var = variables.Variable([1., 2.], name='var')
loss = var + 1.
opt = adam.Adam()
opt.get_updates(loss, [var])
opt_vars = opt.variables()
self.assertLen(opt_vars, 3)
self.assertEqual('Adam/iter:0', opt_vars[0].name)
self.assertEqual('Adam/var/m:0', opt_vars[1].name)
var_2 = variables.Variable([1., 2.], name='var_2')
loss = var_2 + 1.
with backend.name_scope('outter'):
opt.get_updates(loss, [var_2])
opt_vars = opt.variables()
self.assertLen(opt_vars, 5)
self.assertEqual('outter/Adam/var_2/m:0', opt_vars[3].name)
@test_util.run_in_graph_and_eager_modes
def testEmptyVarList(self):
opt = gradient_descent.SGD(1.)
opt.minimize(lambda: constant_op.constant(1.), [])
opt.apply_gradients([])
@test_util.run_in_graph_and_eager_modes
def testAggregationTrue(self):
# Test that all_reduce_sum_gradients=True works without distributed
# strategy.
var = resource_variable_ops.ResourceVariable([1., 2.])
opt = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1., 2.], self.evaluate(var))
opt_op = opt.apply_gradients([([0.1, 0.1], var)],
all_reduce_sum_gradients=True)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.7, 1.7], self.evaluate(var))
@test_util.run_in_graph_and_eager_modes
def testAggregationFalse(self):
# Test that all_reduce_sum_gradients=False works without distributed
# strategy.
var = resource_variable_ops.ResourceVariable([1., 2.])
opt = gradient_descent.SGD(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1., 2.], self.evaluate(var))
opt_op = opt.apply_gradients([([0.1, 0.1], var)],
all_reduce_sum_gradients=False)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertAllClose([0.7, 1.7], self.evaluate(var))
@keras_parameterized.run_all_keras_modes
class OptimizersCompatibilityTest(keras_parameterized.TestCase):
def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True):
if context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in eager mode')
np.random.seed(1331)
with test_util.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 5
model_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_v1.compile(
opt_v1,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_v1.fit(x, y, batch_size=5, epochs=1)
model_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_v2.set_weights(model_v1.get_weights())
model_v2.compile(
opt_v2,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
if not ops.executing_eagerly_outside_functions():
model_v2._make_train_function()
if test_weights:
opt_v2.set_weights(opt_v1.get_weights())
hist_1 = model_v1.fit(x, y, batch_size=5, epochs=1, shuffle=False)
hist_2 = model_v2.fit(x, y, batch_size=5, epochs=1, shuffle=False)
self.assertAllClose(model_v1.get_weights(), model_v2.get_weights(),
rtol=1e-5, atol=1e-5)
self.assertAllClose(hist_1.history['loss'], hist_2.history['loss'],
rtol=1e-5, atol=1e-5)
def testAdadeltaCompatibility(self):
opt_v1 = optimizers.Adadelta(lr=0.01)
opt_v2 = adadelta.Adadelta(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdagradCompatibility(self):
opt_v1 = optimizers.Adagrad(lr=0.01)
opt_v2 = adagrad.Adagrad(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdamCompatibility(self):
opt_v1 = optimizers.Adam()
opt_v2 = adam.Adam()
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testAdamaxCompatibility(self):
opt_v1 = optimizers.Adamax(lr=0.01)
opt_v2 = adamax.Adamax(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testNadamCompatibility(self):
opt_v1 = optimizers.Nadam(lr=0.001)
opt_v2 = nadam.Nadam(learning_rate=0.001)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testMomentumCompatibility(self):
opt_v1 = optimizers.SGD(lr=0.01, momentum=0.9)
opt_v2 = gradient_descent.SGD(learning_rate=0.01, momentum=0.9)
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testRMSpropCompatibility(self):
opt_v1 = optimizers.RMSprop()
opt_v2 = rmsprop.RMSprop()
self._testOptimizersCompatibility(opt_v1, opt_v2)
def testSGDCompatibility(self):
opt_v1 = optimizers.SGD(lr=0.01)
opt_v2 = gradient_descent.SGD(learning_rate=0.01)
self._testOptimizersCompatibility(opt_v1, opt_v2, False)
def testNumericEquivalenceForNesterovMomentum(self):
if context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in eager mode')
np.random.seed(1331)
with test_util.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 5
model_k_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2.set_weights(model_k_v1.get_weights())
model_tf = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_tf.set_weights(model_k_v2.get_weights())
opt_k_v1 = optimizers.SGD(momentum=0.9, nesterov=True)
opt_k_v2 = gradient_descent.SGD(momentum=0.9, nesterov=True)
opt_tf = momentum.MomentumOptimizer(
learning_rate=0.01, momentum=0.9, use_nesterov=True)
model_k_v1.compile(
opt_k_v1,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_k_v2.compile(
opt_k_v2,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_tf.compile(
opt_tf,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_tf = model_tf.fit(x, y, batch_size=5, epochs=10, shuffle=False)
self.assertAllClose(model_k_v1.get_weights(), model_tf.get_weights())
self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
self.assertAllClose(hist_k_v1.history['loss'], hist_tf.history['loss'])
self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
def testNumericEquivalenceForAmsgrad(self):
self.skipTest('b/150382655')
if context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in eager mode')
np.random.seed(1331)
with test_util.use_gpu():
train_samples = 20
input_dim = 3
num_classes = 2
(x, y), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=10,
input_shape=(input_dim,),
num_classes=num_classes)
y = np_utils.to_categorical(y)
num_hidden = 5
model_k_v1 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2 = testing_utils.get_small_sequential_mlp(
num_hidden=num_hidden, num_classes=num_classes, input_dim=input_dim)
model_k_v2.set_weights(model_k_v1.get_weights())
opt_k_v1 = optimizers.Adam(amsgrad=True)
opt_k_v2 = adam.Adam(amsgrad=True)
model_k_v1.compile(
opt_k_v1,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
model_k_v2.compile(
opt_k_v2,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly())
hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
self.assertAllClose(model_k_v1.get_weights(), model_k_v2.get_weights())
self.assertAllClose(opt_k_v1.get_weights(), opt_k_v2.get_weights())
self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
# Note: These tests are kept in a separate class to avoid bugs in some
# distributions of Python that break AutoGraph which is used by tf.function.
class OptimizerWithFunctionTest(test.TestCase):
def testBasic(self):
with context.eager_mode():
var = resource_variable_ops.ResourceVariable([1.0, 2.0],
dtype=dtypes.float32)
loss = lambda: 3 * var
opt = adam.Adam(learning_rate=1.0)
@def_function.function
def fn():
opt.minimize(loss, [var])
return var
self.assertAllClose([0., 1.], fn(), atol=1e-4)
self.assertAllClose([-1, 0.], fn(), atol=1e-4)
def testVarKeyWithVarCreatedInEager(self):
with context.eager_mode():
a = variables.Variable([1., 2.], name='var')
b = variables.Variable([1.], name='var')
@test_util.also_run_as_tf_function
def var_key_test():
self.assertFalse(a._in_graph_mode)
self.assertFalse(b._in_graph_mode)
var_key_a = optimizer_v2._var_key(a)
self.assertStartsWith(var_key_a, 'var_')
var_key_b = optimizer_v2._var_key(b)
self.assertStartsWith(var_key_b, 'var_')
self.assertNotEquals(var_key_a, var_key_b)
var_key_test()
def testLearningRateDecayUsedInTwoFunctions(self):
with context.eager_mode():
a = variables.Variable([1., 2.], name='var')
b = variables.Variable([1.], name='var')
learning_rate_decay = learning_rate_schedule.InverseTimeDecay(
0.5, decay_steps=1.0, decay_rate=0.5)
opt = adam.Adam(learning_rate=learning_rate_decay)
loss_a = lambda: 3 * a
loss_b = lambda: 2 * b
@def_function.function
def fn_a():
opt.minimize(loss_a, [a])
return a
@def_function.function
def fn_b():
opt.minimize(loss_b, [b])
return b
fn_a()
fn_b()
_NUM_LEARNERS = 50
APPLY_SCOPE = 'debug_apply'
WHITELIST = [
# optimizer_v2._deduplicate_indexed_slices contains an indexed slice:
# array_ops.shape(unique_indices)[0]
# which winds up expanding to [0:1:1] thereby creating three constants
# to represent the indices.
('embeddings/strided_slice/stack', 'Const'),
]
def get_inputs(op):
op_inputs = list(op.inputs) + op.control_inputs
names = [i.name for i in op_inputs]
op_inputs = [getattr(i, 'op', i) for i in op_inputs]
return op_inputs, names
def strip_name(node):
if 'Placeholder' in node.op:
return
node.name = ''
def topological_sort(graph):
graph_ops = graph.get_operations()
sources = []
result = []
inputs = {}
outputs = collections.defaultdict(set)
for op in graph_ops:
op_inputs = get_inputs(op)[0]
if not op_inputs:
sources.append(op)
inputs[op] = set(op_inputs)
for i in op_inputs:
outputs[i].add(op)
while sources:
op = sources.pop()
for op_output in outputs[op]:
inputs[op_output].remove(op)
if not inputs[op_output]:
sources.append(op_output)
result.append(op)
# Check correctness.
if len(result) != len(graph_ops):
raise ValueError('Sort result has {} ops, source graph has {}.'
.format(len(result), len(graph_ops)))
sort_check_seen = set()
for op in result:
sort_check_seen.add(op)
for i in get_inputs(op)[0]:
assert i in sort_check_seen
return result
def identify_redundant_ops(graph):
"""Implements basic common subexpression elimination.
This is not intended to replicate the graph semantics of TensorFlow Graphs
(for instance it does not handle stateful op ordering), nor is it intended to
replace the common subexpression elimination Grappler pass. Rather, it
provides a high level sanity check that clearly redundant ops are not being
created.
Args:
graph: The graph to be analyzed.
Returns:
A count of the duplicate ops and a description of the structure of each.
"""
sorted_ops = topological_sort(graph)
duplicates = collections.defaultdict(list)
unified_node_defs = {}
name_map = {}
for op in sorted_ops:
input_names = []
for op_input, name in zip(*get_inputs(op)):
input_def = op_input.node_def
# Operations can have multiple outputs. We track which is used to prevent
# overzealous elimination.
input_def.name = name
input_def.input[:] = [name_map.get(i, i) for i in input_def.input]
strip_name(input_def)
# NodeDef.SerializeToString() does not provide identical serialized
# representations for identical NodeDefs, so we instead use string
# representation as a dict key.
key = repr(input_def)
if key in unified_node_defs:
input_names.append(unified_node_defs[key])
else:
unified_node_defs[key] = op_input.name
input_names.append(name)
node_def = op.node_def
node_def.input[:] = input_names
strip_name(node_def)
key = repr(node_def)
duplicates[key].append(op)
name_map[op.name] = duplicates[key][0].name
num_duplicates = 0
duplicate_types = []
for standard_def, op_defs in duplicates.items():
# We are only interested in testing the apply method of the optimizer
op_defs = [i for i in op_defs if APPLY_SCOPE in i.name]
# We only check for per-apply redundant ops.
if len(op_defs) < _NUM_LEARNERS:
continue
# Certain ops are simply not worth eliminating, and are instead simply
# ignored.
name, op_type = op_defs[0].name, op_defs[0].type
if any(whitelisted_scope in name and op_type == whitelisted_type
for whitelisted_scope, whitelisted_type in WHITELIST):
continue
num_duplicates += len(op_defs)
traceback = []
for level in op_defs[0].traceback:
traceback.append(' {} {}:{}'.format(level[0], level[2], level[1]))
duplicate_types.append(
'# Example name: {}\n# Op creation stack:\n{}\n{}'.format(
op_defs[0].name,
'\n'.join(traceback),
standard_def))
return num_duplicates, duplicate_types
def make_model():
r"""Constructs a simple ensemble of weak learners model.
--------- --------- --------- ---------
| Input | | Input | ... | Input | | Input |
--------- --------- --------- ---------
| | | |
V V V V
--------- --------- --------- ---------
| Embed | | Embed | ... | Embed | | Embed |
--------- --------- --------- ---------
| | | |
V V V V
--------- --------- --------- ---------
| Dense | | Dense | ... | Dense | | Dense |
--------- --------- --------- ---------
\ | | /
\ | | /
---------------------------------------------
|
---------
| Dense |
---------
This topology is chosen because it exercises both dense and sparse update
paths.
Returns:
A model for testing optimizer coefficient reuse.
"""
inputs = []
intermediates = []
for _ in range(_NUM_LEARNERS):
inp = keras.layers.Input(shape=(1,), dtype=dtypes.int32)
layer = keras.layers.Embedding(1, 4)(inp)
layer = keras.layers.Dense(1)(layer)
inputs.append(inp)
intermediates.append(layer)
layer = keras.layers.Concatenate(axis=-1)(intermediates)
layer = keras.layers.Dense(1)(layer)
return keras.models.Model(inputs, layer)
COEFFICIENT_PARAMS = (
('Adadelta', adadelta.Adadelta, None),
('Adagrad', adagrad.Adagrad, None),
('Adam', adam.Adam, None),
('Adam_amdgrad', adam.Adam, dict(amsgrad=True)),
('Adamax', adamax.Adamax, None),
('Ftrl', ftrl.Ftrl, None),
('Ftrl_l2_shrinkage', ftrl.Ftrl,
dict(l2_shrinkage_regularization_strength=0.1)),
('SGD', gradient_descent.SGD, None),
('SGD_momentum', gradient_descent.SGD, dict(momentum=0.5)),
('Nadam', nadam.Nadam, None),
('RMSprop', rmsprop.RMSprop, None),
('RMSprop_centered', rmsprop.RMSprop, dict(centered=True)),
('RMSprop_momentum', rmsprop.RMSprop, dict(momentum=0.5)),
('RMSprop_momentum_centered', rmsprop.RMSprop,
dict(momentum=0.5, centered=True)),
)
class OptimizerCoefficientTest(keras_parameterized.TestCase):
@parameterized.named_parameters(*COEFFICIENT_PARAMS)
def test_duplicate_ops(self, optimizer_class, init_kwargs=None):
init_kwargs = init_kwargs or {}
optimizer = optimizer_class(**init_kwargs)
graph = ops.Graph()
with graph.as_default():
model = make_model()
trainable_variables = model.trainable_variables
grads = optimizer.get_gradients(model.outputs[0], trainable_variables)
with backend.name_scope(APPLY_SCOPE):
optimizer.apply_gradients(zip(grads, trainable_variables))
num_duplicates, duplicate_types = identify_redundant_ops(graph)
if num_duplicates:
# Avoid spamming logs.
if len(duplicate_types) > 3:
duplicate_types = duplicate_types[:3] + ['...']
num_total = len(graph.get_operations())
raise ValueError('{} of {} ({:.1f}%) ops were duplicates:\n\n{}'.format(
num_duplicates, num_total, num_duplicates / num_total * 100,
'\n'.join(duplicate_types)))
@parameterized.named_parameters(*COEFFICIENT_PARAMS)
def test_subclass_compat(self, optimizer_class, init_kwargs=None):
"""Ensure that subclassed optimizers without apply_state still work."""
class SubclassedOptimizer(optimizer_class):
def _resource_apply_dense(self, grad, var): # pylint: disable=useless-super-delegation
return super(SubclassedOptimizer, self)._resource_apply_dense(grad, var)
def _resource_apply_sparse(self, grad, var, indices): # pylint: disable=useless-super-delegation
return super(SubclassedOptimizer, self)._resource_apply_sparse(
grad, var, indices)
init_kwargs = init_kwargs or {}
optimizer = SubclassedOptimizer(**init_kwargs)
graph = ops.Graph()
with graph.as_default():
model = make_model()
trainable_variables = model.trainable_variables
grads = optimizer.get_gradients(model.outputs[0], trainable_variables)
with backend.name_scope(APPLY_SCOPE):
optimizer.apply_gradients(zip(grads, trainable_variables))
if __name__ == '__main__':
test.main()
| |
#!/usr/bin/python3
import subprocess, shutil, os, sqlite3, re
import utils
def validate_email(email, mode=None):
# There are a lot of characters permitted in email addresses, but
# Dovecot's sqlite driver seems to get confused if there are any
# unusual characters in the address. Bah. Also note that since
# the mailbox path name is based on the email address, the address
# shouldn't be absurdly long and must not have a forward slash.
if len(email) > 255: return False
if mode == 'user':
# For Dovecot's benefit, only allow basic characters.
ATEXT = r'[a-zA-Z0-9_\-]'
elif mode in (None, 'alias'):
# For aliases, we can allow any valid email address.
# Based on RFC 2822 and https://github.com/SyrusAkbary/validate_email/blob/master/validate_email.py,
# these characters are permitted in email addresses.
ATEXT = r'[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]' # see 3.2.4
else:
raise ValueError(mode)
# per RFC 2822 3.2.4
DOT_ATOM_TEXT_LOCAL = ATEXT + r'+(?:\.' + ATEXT + r'+)*'
if mode == 'alias':
# For aliases, Postfix accepts '@domain.tld' format for
# catch-all addresses on the source side and domain aliases
# on the destination side. Make the local part optional.
DOT_ATOM_TEXT_LOCAL = '(?:' + DOT_ATOM_TEXT_LOCAL + ')?'
# as above, but we can require that the host part have at least
# one period in it, so use a "+" rather than a "*" at the end
DOT_ATOM_TEXT_HOST = ATEXT + r'+(?:\.' + ATEXT + r'+)+'
# per RFC 2822 3.4.1
ADDR_SPEC = '^(%s)@(%s)$' % (DOT_ATOM_TEXT_LOCAL, DOT_ATOM_TEXT_HOST)
# Check the regular expression.
m = re.match(ADDR_SPEC, email)
if not m: return False
# Check that the domain part is IDNA-encodable.
localpart, domainpart = m.groups()
try:
domainpart.encode("idna")
except:
return False
return True
def sanitize_idn_email_address(email):
# Convert an IDNA-encoded email address (domain part) into Unicode
# before storing in our database. Chrome may IDNA-ize <input type="email">
# values before POSTing, so we want to normalize before putting
# values into the database.
try:
localpart, domainpart = email.split("@")
domainpart = domainpart.encode("ascii").decode("idna")
return localpart + "@" + domainpart
except:
# Domain part is already Unicode or not IDNA-valid, so
# leave unchanged.
return email
def open_database(env, with_connection=False):
conn = sqlite3.connect(env["STORAGE_ROOT"] + "/mail/users.sqlite")
if not with_connection:
return conn.cursor()
else:
return conn, conn.cursor()
def get_mail_users(env):
# Returns a flat, sorted list of all user accounts.
c = open_database(env)
c.execute('SELECT email FROM users')
users = [ row[0] for row in c.fetchall() ]
return utils.sort_email_addresses(users, env)
def get_mail_users_ex(env, with_archived=False, with_slow_info=False):
# Returns a complex data structure of all user accounts, optionally
# including archived (status="inactive") accounts.
#
# [
# {
# domain: "domain.tld",
# users: [
# {
# email: "name@domain.tld",
# privileges: [ "priv1", "priv2", ... ],
# status: "active",
# aliases: [
# ("alias@domain.tld", ["indirect.alias@domain.tld", ...]),
# ...
# ]
# },
# ...
# ]
# },
# ...
# ]
# Pre-load all aliases.
aliases = get_mail_alias_map(env)
# Get users and their privileges.
users = []
active_accounts = set()
c = open_database(env)
c.execute('SELECT email, privileges FROM users')
for email, privileges in c.fetchall():
active_accounts.add(email)
user = {
"email": email,
"privileges": parse_privs(privileges),
"status": "active",
}
users.append(user)
if with_slow_info:
user["aliases"] = [
(alias, sorted(evaluate_mail_alias_map(alias, aliases, env)))
for alias in aliases.get(email.lower(), [])
]
user["mailbox_size"] = utils.du(os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes', *reversed(email.split("@"))))
# Add in archived accounts.
if with_archived:
root = os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes')
for domain in os.listdir(root):
for user in os.listdir(os.path.join(root, domain)):
email = user + "@" + domain
mbox = os.path.join(root, domain, user)
if email in active_accounts: continue
user = {
"email": email,
"privileges": "",
"status": "inactive",
"mailbox": mbox,
}
users.append(user)
if with_slow_info:
user["mailbox_size"] = utils.du(mbox)
# Group by domain.
domains = { }
for user in users:
domain = get_domain(user["email"])
if domain not in domains:
domains[domain] = {
"domain": domain,
"users": []
}
domains[domain]["users"].append(user)
# Sort domains.
domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)]
# Sort users within each domain first by status then lexicographically by email address.
for domain in domains:
domain["users"].sort(key = lambda user : (user["status"] != "active", user["email"]))
return domains
def get_admins(env):
# Returns a set of users with admin privileges.
users = set()
for domain in get_mail_users_ex(env):
for user in domain["users"]:
if "admin" in user["privileges"]:
users.add(user["email"])
return users
def get_mail_aliases(env):
# Returns a sorted list of tuples of (alias, forward-to string).
c = open_database(env)
c.execute('SELECT source, destination FROM aliases')
aliases = { row[0]: row[1] for row in c.fetchall() } # make dict
# put in a canonical order: sort by domain, then by email address lexicographically
aliases = [ (source, aliases[source]) for source in utils.sort_email_addresses(aliases.keys(), env) ]
return aliases
def get_mail_aliases_ex(env):
# Returns a complex data structure of all mail aliases, similar
# to get_mail_users_ex.
#
# [
# {
# domain: "domain.tld",
# alias: [
# {
# source: "name@domain.tld",
# destination: ["target1@domain.com", "target2@domain.com", ...],
# required: True|False
# },
# ...
# ]
# },
# ...
# ]
required_aliases = get_required_aliases(env)
domains = {}
for source, destination in get_mail_aliases(env):
# get alias info
domain = get_domain(source)
required = ((source in required_aliases) or (source == get_system_administrator(env)))
# add to list
if not domain in domains:
domains[domain] = {
"domain": domain,
"aliases": [],
}
domains[domain]["aliases"].append({
"source": source,
"destination": [d.strip() for d in destination.split(",")],
"required": required,
})
# Sort domains.
domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)]
# Sort aliases within each domain first by required-ness then lexicographically by source address.
for domain in domains:
domain["aliases"].sort(key = lambda alias : (alias["required"], alias["source"]))
return domains
def get_mail_alias_map(env):
aliases = { }
for alias, targets in get_mail_aliases(env):
for em in targets.split(","):
em = em.strip().lower()
aliases.setdefault(em, []).append(alias)
return aliases
def evaluate_mail_alias_map(email, aliases, env):
ret = set()
for alias in aliases.get(email.lower(), []):
ret.add(alias)
ret |= evaluate_mail_alias_map(alias, aliases, env)
return ret
def get_domain(emailaddr):
return emailaddr.split('@', 1)[1]
def get_mail_domains(env, filter_aliases=lambda alias : True):
return set(
[get_domain(addr) for addr in get_mail_users(env)]
+ [get_domain(source) for source, target in get_mail_aliases(env) if filter_aliases((source, target)) ]
)
def add_mail_user(email, pw, privs, env):
# accept IDNA domain names but normalize to Unicode before going into database
email = sanitize_idn_email_address(email)
# validate email
if email.strip() == "":
return ("No email address provided.", 400)
if not validate_email(email, mode='user'):
return ("Invalid email address.", 400)
validate_password(pw)
# validate privileges
if privs is None or privs.strip() == "":
privs = []
else:
privs = privs.split("\n")
for p in privs:
validation = validate_privilege(p)
if validation: return validation
# get the database
conn, c = open_database(env, with_connection=True)
# hash the password
pw = hash_password(pw)
# add the user to the database
try:
c.execute("INSERT INTO users (email, password, privileges) VALUES (?, ?, ?)",
(email, pw, "\n".join(privs)))
except sqlite3.IntegrityError:
return ("User already exists.", 400)
# write databasebefore next step
conn.commit()
# Create the user's INBOX, Spam, and Drafts folders, and subscribe them.
# K-9 mail will poll every 90 seconds if a Drafts folder does not exist, so create it
# to avoid unnecessary polling.
# Check if the mailboxes exist before creating them. When creating a user that had previously
# been deleted, the mailboxes will still exist because they are still on disk.
try:
existing_mboxes = utils.shell('check_output', ["doveadm", "mailbox", "list", "-u", email, "-8"], capture_stderr=True).split("\n")
except subprocess.CalledProcessError as e:
c.execute("DELETE FROM users WHERE email=?", (email,))
conn.commit()
return ("Failed to initialize the user: " + e.output.decode("utf8"), 400)
for folder in ("INBOX", "Spam", "Drafts"):
if folder not in existing_mboxes:
utils.shell('check_call', ["doveadm", "mailbox", "create", "-u", email, "-s", folder])
# Update things in case any new domains are added.
return kick(env, "mail user added")
def set_mail_password(email, pw, env):
# accept IDNA domain names but normalize to Unicode before going into database
email = sanitize_idn_email_address(email)
# validate that password is acceptable
validate_password(pw)
# hash the password
pw = hash_password(pw)
# update the database
conn, c = open_database(env, with_connection=True)
c.execute("UPDATE users SET password=? WHERE email=?", (pw, email))
if c.rowcount != 1:
return ("That's not a user (%s)." % email, 400)
conn.commit()
return "OK"
def hash_password(pw):
# Turn the plain password into a Dovecot-format hashed password, meaning
# something like "{SCHEME}hashedpassworddata".
# http://wiki2.dovecot.org/Authentication/PasswordSchemes
return utils.shell('check_output', ["/usr/bin/doveadm", "pw", "-s", "SHA512-CRYPT", "-p", pw]).strip()
def get_mail_password(email, env):
# Gets the hashed password for a user. Passwords are stored in Dovecot's
# password format, with a prefixed scheme.
# http://wiki2.dovecot.org/Authentication/PasswordSchemes
# update the database
c = open_database(env)
c.execute('SELECT password FROM users WHERE email=?', (email,))
rows = c.fetchall()
if len(rows) != 1:
raise ValueError("That's not a user (%s)." % email)
return rows[0][0]
def remove_mail_user(email, env):
# accept IDNA domain names but normalize to Unicode before going into database
email = sanitize_idn_email_address(email)
# remove
conn, c = open_database(env, with_connection=True)
c.execute("DELETE FROM users WHERE email=?", (email,))
if c.rowcount != 1:
return ("That's not a user (%s)." % email, 400)
conn.commit()
# Update things in case any domains are removed.
return kick(env, "mail user removed")
def parse_privs(value):
return [p for p in value.split("\n") if p.strip() != ""]
def get_mail_user_privileges(email, env):
# accept IDNA domain names but normalize to Unicode before going into database
email = sanitize_idn_email_address(email)
# get privs
c = open_database(env)
c.execute('SELECT privileges FROM users WHERE email=?', (email,))
rows = c.fetchall()
if len(rows) != 1:
return ("That's not a user (%s)." % email, 400)
return parse_privs(rows[0][0])
def validate_privilege(priv):
if "\n" in priv or priv.strip() == "":
return ("That's not a valid privilege (%s)." % priv, 400)
return None
def add_remove_mail_user_privilege(email, priv, action, env):
# accept IDNA domain names but normalize to Unicode before going into database
email = sanitize_idn_email_address(email)
# validate
validation = validate_privilege(priv)
if validation: return validation
# get existing privs, but may fail
privs = get_mail_user_privileges(email, env)
if isinstance(privs, tuple): return privs # error
# update privs set
if action == "add":
if priv not in privs:
privs.append(priv)
elif action == "remove":
privs = [p for p in privs if p != priv]
else:
return ("Invalid action.", 400)
# commit to database
conn, c = open_database(env, with_connection=True)
c.execute("UPDATE users SET privileges=? WHERE email=?", ("\n".join(privs), email))
if c.rowcount != 1:
return ("Something went wrong.", 400)
conn.commit()
return "OK"
def add_mail_alias(source, destination, env, update_if_exists=False, do_kick=True):
# accept IDNA domain names but normalize to Unicode before going into database
source = sanitize_idn_email_address(source)
# validate source
if source.strip() == "":
return ("No incoming email address provided.", 400)
if not validate_email(source, mode='alias'):
return ("Invalid incoming email address (%s)." % source, 400)
# validate destination
dests = []
destination = destination.strip()
if validate_email(destination, mode='alias'):
# Oostfix allows a single @domain.tld as the destination, which means
# the local part on the address is preserved in the rewrite.
dests.append(sanitize_idn_email_address(destination))
else:
# Parse comma and \n-separated destination emails & validate. In this
# case, the recipients must be complete email addresses.
for line in destination.split("\n"):
for email in line.split(","):
email = email.strip()
email = sanitize_idn_email_address(email) # Unicode => IDNA
if email == "": continue
if not validate_email(email):
return ("Invalid destination email address (%s)." % email, 400)
dests.append(email)
if len(destination) == 0:
return ("No destination email address(es) provided.", 400)
destination = ",".join(dests)
# save to db
conn, c = open_database(env, with_connection=True)
try:
c.execute("INSERT INTO aliases (source, destination) VALUES (?, ?)", (source, destination))
return_status = "alias added"
except sqlite3.IntegrityError:
if not update_if_exists:
return ("Alias already exists (%s)." % source, 400)
else:
c.execute("UPDATE aliases SET destination = ? WHERE source = ?", (destination, source))
return_status = "alias updated"
conn.commit()
if do_kick:
# Update things in case any new domains are added.
return kick(env, return_status)
def remove_mail_alias(source, env, do_kick=True):
# accept IDNA domain names but normalize to Unicode before going into database
source = sanitize_idn_email_address(source)
# remove
conn, c = open_database(env, with_connection=True)
c.execute("DELETE FROM aliases WHERE source=?", (source,))
if c.rowcount != 1:
return ("That's not an alias (%s)." % source, 400)
conn.commit()
if do_kick:
# Update things in case any domains are removed.
return kick(env, "alias removed")
def get_system_administrator(env):
return "administrator@" + env['PRIMARY_HOSTNAME']
def get_required_aliases(env):
# These are the aliases that must exist.
aliases = set()
# The hostmaster alias is exposed in the DNS SOA for each zone.
aliases.add("hostmaster@" + env['PRIMARY_HOSTNAME'])
# Get a list of domains we serve mail for, except ones for which the only
# email on that domain is a postmaster/admin alias to the administrator
# or a wildcard alias (since it will forward postmaster/admin).
real_mail_domains = get_mail_domains(env,
filter_aliases = lambda alias :
((not alias[0].startswith("postmaster@") and not alias[0].startswith("admin@")) or alias[1] != get_system_administrator(env))
and not alias[0].startswith("@")
)
# Create postmaster@ and admin@ for all domains we serve mail on.
# postmaster@ is assumed to exist by our Postfix configuration. admin@
# isn't anything, but it might save the user some trouble e.g. when
# buying an SSL certificate.
for domain in real_mail_domains:
aliases.add("postmaster@" + domain)
aliases.add("admin@" + domain)
return aliases
def kick(env, mail_result=None):
results = []
# Inclde the current operation's result in output.
if mail_result is not None:
results.append(mail_result + "\n")
# Ensure every required alias exists.
existing_users = get_mail_users(env)
existing_aliases = get_mail_aliases(env)
required_aliases = get_required_aliases(env)
def ensure_admin_alias_exists(source):
# If a user account exists with that address, we're good.
if source in existing_users:
return
# Does this alias exists?
for s, t in existing_aliases:
if s == source:
return
# Doesn't exist.
administrator = get_system_administrator(env)
add_mail_alias(source, administrator, env, do_kick=False)
results.append("added alias %s (=> %s)\n" % (source, administrator))
for alias in required_aliases:
ensure_admin_alias_exists(alias)
# Remove auto-generated postmaster/admin on domains we no
# longer have any other email addresses for.
for source, target in existing_aliases:
user, domain = source.split("@")
if user in ("postmaster", "admin") \
and source not in required_aliases \
and target == get_system_administrator(env):
remove_mail_alias(source, env, do_kick=False)
results.append("removed alias %s (was to %s; domain no longer used for email)\n" % (source, target))
# Update DNS and nginx in case any domains are added/removed.
from dns_update import do_dns_update
results.append( do_dns_update(env) )
from web_update import do_web_update
results.append( do_web_update(env) )
return "".join(s for s in results if s != "")
def validate_password(pw):
# validate password
if pw.strip() == "":
raise ValueError("No password provided.")
if re.search(r"[\s]", pw):
raise ValueError("Passwords cannot contain spaces.")
if len(pw) < 4:
raise ValueError("Passwords must be at least four characters.")
if __name__ == "__main__":
import sys
if len(sys.argv) > 2 and sys.argv[1] == "validate-email":
# Validate that we can create a Dovecot account for a given string.
if validate_email(sys.argv[2], mode='user'):
sys.exit(0)
else:
sys.exit(1)
if len(sys.argv) > 1 and sys.argv[1] == "update":
from utils import load_environment
print(kick(load_environment()))
| |
from sqlalchemy import and_
from sqlalchemy import case
from sqlalchemy import cast
from sqlalchemy import Column
from sqlalchemy import exc
from sqlalchemy import Integer
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
info_table = None
class CaseTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def setup_class(cls):
metadata = MetaData(testing.db)
global info_table
info_table = Table(
"infos",
metadata,
Column("pk", Integer, primary_key=True),
Column("info", String(30)),
)
info_table.create()
info_table.insert().execute(
{"pk": 1, "info": "pk_1_data"},
{"pk": 2, "info": "pk_2_data"},
{"pk": 3, "info": "pk_3_data"},
{"pk": 4, "info": "pk_4_data"},
{"pk": 5, "info": "pk_5_data"},
{"pk": 6, "info": "pk_6_data"},
)
@classmethod
def teardown_class(cls):
info_table.drop()
@testing.fails_on("firebird", "FIXME: unknown")
@testing.requires.subqueries
def test_case(self):
inner = select(
[
case(
[
[info_table.c.pk < 3, "lessthan3"],
[
and_(info_table.c.pk >= 3, info_table.c.pk < 7),
"gt3",
],
]
).label("x"),
info_table.c.pk,
info_table.c.info,
],
from_obj=[info_table],
)
inner_result = inner.execute().fetchall()
# Outputs:
# lessthan3 1 pk_1_data
# lessthan3 2 pk_2_data
# gt3 3 pk_3_data
# gt3 4 pk_4_data
# gt3 5 pk_5_data
# gt3 6 pk_6_data
assert inner_result == [
("lessthan3", 1, "pk_1_data"),
("lessthan3", 2, "pk_2_data"),
("gt3", 3, "pk_3_data"),
("gt3", 4, "pk_4_data"),
("gt3", 5, "pk_5_data"),
("gt3", 6, "pk_6_data"),
]
outer = select([inner.alias("q_inner")])
outer_result = outer.execute().fetchall()
assert outer_result == [
("lessthan3", 1, "pk_1_data"),
("lessthan3", 2, "pk_2_data"),
("gt3", 3, "pk_3_data"),
("gt3", 4, "pk_4_data"),
("gt3", 5, "pk_5_data"),
("gt3", 6, "pk_6_data"),
]
w_else = select(
[
case(
[
[info_table.c.pk < 3, cast(3, Integer)],
[and_(info_table.c.pk >= 3, info_table.c.pk < 6), 6],
],
else_=0,
).label("x"),
info_table.c.pk,
info_table.c.info,
],
from_obj=[info_table],
)
else_result = w_else.execute().fetchall()
assert else_result == [
(3, 1, "pk_1_data"),
(3, 2, "pk_2_data"),
(6, 3, "pk_3_data"),
(6, 4, "pk_4_data"),
(6, 5, "pk_5_data"),
(0, 6, "pk_6_data"),
]
def test_literal_interpretation_ambiguous(self):
assert_raises_message(
exc.ArgumentError,
r"Column expression expected, got 'x'",
case,
[("x", "y")],
)
def test_literal_interpretation_ambiguous_tuple(self):
assert_raises_message(
exc.ArgumentError,
r"Column expression expected, got \('x', 'y'\)",
case,
[(("x", "y"), "z")],
)
def test_literal_interpretation(self):
t = table("test", column("col1"))
self.assert_compile(
case([("x", "y")], value=t.c.col1),
"CASE test.col1 WHEN :param_1 THEN :param_2 END",
)
self.assert_compile(
case([(t.c.col1 == 7, "y")], else_="z"),
"CASE WHEN (test.col1 = :col1_1) THEN :param_1 ELSE :param_2 END",
)
def test_text_doesnt_explode(self):
for s in [
select(
[
case(
[(info_table.c.info == "pk_4_data", text("'yes'"))],
else_=text("'no'"),
)
]
).order_by(info_table.c.info),
select(
[
case(
[
(
info_table.c.info == "pk_4_data",
literal_column("'yes'"),
)
],
else_=literal_column("'no'"),
)
]
).order_by(info_table.c.info),
]:
if testing.against("firebird"):
eq_(
s.execute().fetchall(),
[
("no ",),
("no ",),
("no ",),
("yes",),
("no ",),
("no ",),
],
)
else:
eq_(
s.execute().fetchall(),
[("no",), ("no",), ("no",), ("yes",), ("no",), ("no",)],
)
@testing.fails_on("firebird", "FIXME: unknown")
def testcase_with_dict(self):
query = select(
[
case(
{
info_table.c.pk < 3: "lessthan3",
info_table.c.pk >= 3: "gt3",
},
else_="other",
),
info_table.c.pk,
info_table.c.info,
],
from_obj=[info_table],
)
assert query.execute().fetchall() == [
("lessthan3", 1, "pk_1_data"),
("lessthan3", 2, "pk_2_data"),
("gt3", 3, "pk_3_data"),
("gt3", 4, "pk_4_data"),
("gt3", 5, "pk_5_data"),
("gt3", 6, "pk_6_data"),
]
simple_query = select(
[
case(
{1: "one", 2: "two"}, value=info_table.c.pk, else_="other"
),
info_table.c.pk,
],
whereclause=info_table.c.pk < 4,
from_obj=[info_table],
)
assert simple_query.execute().fetchall() == [
("one", 1),
("two", 2),
("other", 3),
]
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import str
from past.builtins import basestring
from datetime import datetime
from contextlib import closing
import sys
from typing import Optional
from sqlalchemy import create_engine
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None # type: Optional[str]
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
uri = '{conn.conn_type}://{login}{host}/'.format(
conn=conn, login=login, host=host)
if conn.schema:
uri += conn.schema
return uri
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if sys.version_info[0] < 3:
s = s.encode('utf-8')
if parameters is not None:
self.log.info("{} with parameters {}".format(s, parameters))
cur.execute(s, parameters)
else:
self.log.info(s)
cur.execute(s)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
def set_autocommit(self, conn, autocommit):
"""
Sets the autocommit flag on the connection
"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr)
)
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting.
:rtype: bool
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000,
replace=False):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
placeholders = ["%s", ] * len(values)
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{0} {1} VALUES ({2})".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded %s into %s rows so far", i, table
)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| |
#/*******************************************************************************
#* Portions Copyright (C) 2008 Novell, Inc. All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions are met:
#*
#* - Redistributions of source code must retain the above copyright notice,
#* this list of conditions and the following disclaimer.
#*
#* - Redistributions in binary form must reproduce the above copyright notice,
#* this list of conditions and the following disclaimer in the documentation
#* and/or other materials provided with the distribution.
#*
#* - Neither the name of Novell, Inc. nor the names of its
#* contributors may be used to endorse or promote products derived from this
#* software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
#* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#* ARE DISCLAIMED. IN NO EVENT SHALL Novell, Inc. OR THE CONTRIBUTORS
#* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#*
#* Authors: Matt Ryan (mrayn novell.com)
#* Brad Nicholes (bnicholes novell.com)
#******************************************************************************/
import os
import rrdtool
import logging
from time import time
from Gmetad.gmetad_plugin import GmetadPlugin
from Gmetad.gmetad_config import getConfig, GmetadConfig
def get_plugin():
''' All plugins are required to implement this method. It is used as the factory
function that instanciates a new plugin instance. '''
# The plugin configuration ID that is passed in must match the section name
# in the configuration file.
return RRDPlugin('rrd')
class RRDPlugin(GmetadPlugin):
''' This class implements the RRD plugin that stores metric data to RRD files.'''
RRAS = 'RRAs'
RRD_ROOTDIR = 'rrd_rootdir'
# Default RRAs
_cfgDefaults = {
RRAS : [
'RRA:AVERAGE:0.5:1:244',
'RRA:AVERAGE:0.5:24:244',
'RRA:AVERAGE:0.5:168:244',
'RRA:AVERAGE:0.5:672:244',
'RRA:AVERAGE:0.5:5760:374'
],
RRD_ROOTDIR : '/var/lib/ganglia/rrds',
}
def __init__(self, cfgid):
self.rrdpath = None
self.cfg = None
self.kwHandlers = None
self._resetConfig()
# The call to the parent class __init__ must be last
GmetadPlugin.__init__(self, cfgid)
def _resetConfig(self):
self.rrdpath = None
self.cfg = RRDPlugin._cfgDefaults
self.kwHandlers = {
RRDPlugin.RRAS : self._parseRRAs,
RRDPlugin.RRD_ROOTDIR : self._parseRrdRootdir
}
def _parseConfig(self, cfgdata):
'''This method overrides the plugin base class method. It is used to
parse the plugin specific configuration directives.'''
for kw,args in cfgdata:
if self.kwHandlers.has_key(kw):
self.kwHandlers[kw](args)
def _parseRrdRootdir(self, arg):
''' Parse the RRD root directory directive. '''
v = arg.strip().strip('"')
if os.path.isdir(v):
self.cfg[RRDPlugin.RRD_ROOTDIR] = v
def _parseRRAs(self, args):
''' Parse the RRAs directive. '''
self.cfg[RRDPlugin.RRAS] = []
for rraspec in args.split():
self.cfg[RRDPlugin.RRAS].append(rraspec.strip().strip('"'))
def _checkDir(self, dir):
''' This method validates that an RRD directory exists or creates the directory
if it doesn't exist. '''
if not os.path.isdir(dir):
os.mkdir(dir, 0755)
def _createRRD(self, clusterNode, metricNode, rrdPath, step, summary):
''' This method creates a new metric RRD file.'''
# Determine the RRD data source type.
slope = metricNode.getAttr('slope')
if slope.lower() == 'positive':
dsType = 'COUNTER'
else:
dsType = 'GAUGE'
localTime = clusterNode.getAttr('localtime')
if localTime is None:
localTime = int(time())
# Calculate the heartbeat.
heartbeat = 8*step
# Format the data source string and add all of the RRDTool arguments to the
# args list.
dsString = 'DS:sum:%s:%d:U:U'%(dsType,heartbeat)
args = [str(rrdPath), '-b', str(localTime), '-s', str(step), str(dsString)]
if summary is True:
dsString = 'DS:num:%s:%d:U:U'%(dsType,heartbeat)
args.append(str(dsString))
for rra in self.cfg[RRDPlugin.RRAS]:
args.append(rra)
try:
# Create the RRD file with the supplied args.
rrdtool.create(*args)
logging.debug('Created rrd %s'%rrdPath)
except Exception, e:
logging.info('Error creating rrd %s - %s'%(rrdPath, str(e)))
def _updateRRD(self, clusterNode, metricNode, rrdPath, summary):
''' This method updates an RRD file with current metric values. '''
# If the node has a time stamp then use it to update the RRD. Otherwise get
# the current timestamp.
processTime = clusterNode.getAttr('localtime')
if processTime is None:
processTime = int(time())
# If this is a summary RRD, format the summary entry. Otherwise just use a standard entry
if summary is True:
args = [str(rrdPath), '%s:%s:%s'%(str(processTime),str(metricNode.getAttr('sum')),str(metricNode.getAttr('num')))]
else:
args = [str(rrdPath), '%s:%s'%(str(processTime),str(metricNode.getAttr('val')))]
try:
# Update the RRD file with the current timestamp and value
rrdtool.update(*args)
#logging.debug('Updated rrd %s with value %s'%(rrdPath, str(metricNode.getAttr('val'))))
except Exception, e:
logging.info('Error updating rrd %s - %s'%(rrdPath, str(e)))
def start(self):
'''Called by the engine during initialization to get the plugin going.'''
#print "RRD start called"
pass
def stop(self):
'''Called by the engine during shutdown to allow the plugin to shutdown.'''
#print "RRD stop called"
pass
def notify(self, clusterNode):
'''Called by the engine when the internal data source has changed.'''
# Get the current configuration
gmetadConfig = getConfig()
# Find the data source configuration entry that matches the cluster name
for ds in gmetadConfig[GmetadConfig.DATA_SOURCE]:
if ds.name == clusterNode.getAttr('name'):
break
if ds is None:
logging.info('No matching data source for %s'%clusterNode.getAttr('name'))
return
try:
if clusterNode.getAttr('status') == 'down':
return
except AttributeError:
pass
# Create the cluster RRD base path and validate it
clusterPath = '%s/%s'%(self.cfg[RRDPlugin.RRD_ROOTDIR], clusterNode.getAttr('name'))
if 'GRID' == clusterNode.id:
clusterPath = '%s/__SummaryInfo__'%clusterPath
self._checkDir(clusterPath)
# We do not want to process grid data
if 'GRID' == clusterNode.id:
return
# Update metrics for each host in the cluster
for hostNode in clusterNode:
# Create the host RRD base path and validate it.
hostPath = '%s/%s'%(clusterPath,hostNode.getAttr('name'))
self._checkDir(hostPath)
# Update metrics for each host
for metricNode in hostNode:
# Don't update metrics that are numeric values.
if metricNode.getAttr('type') in ['string', 'timestamp']:
continue
# Create the RRD final path and validate it.
rrdPath = '%s/%s.rrd'%(hostPath, metricNode.getAttr('name'))
# Create the RRD metric file if it doesn't exist
if not os.path.isfile(rrdPath):
self._createRRD(clusterNode, metricNode, rrdPath, ds.interval, False)
#need to do some error checking here if the createRRD failed
# Update the RRD file.
self._updateRRD(clusterNode, metricNode, rrdPath, False)
#print "RRD notify called"
| |
# Copyright 2010-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import time
import eventlet
from oslo_log import log as logging
from trove.common import exception as rd_exception
from trove.common import instance as rd_instance
from trove.tests.util import unquote_user_host
DB = {}
LOG = logging.getLogger(__name__)
BACKUP_SIZE = 0.14
class FakeGuest(object):
def __init__(self, id):
self.id = id
self.users = {}
self.dbs = {}
self.root_was_enabled = False
self.version = 1
self.grants = {}
self.overrides = {}
# Our default admin user.
self._create_user({
"_name": "os_admin",
"_host": "%",
"_password": "12345",
"_databases": [],
})
def get_hwinfo(self):
return {'mem_total': 524288, 'num_cpus': 1}
def get_diagnostics(self):
return {
'version': str(self.version),
'fd_size': 64,
'vm_size': 29096,
'vm_peak': 29160,
'vm_rss': 2872,
'vm_hwm': 2872,
'threads': 2
}
def update_guest(self):
LOG.debug("Updating guest %s" % self.id)
self.version += 1
def _check_username(self, username):
unsupported_chars = re.compile("^\s|\s$|'|\"|;|`|,|/|\\\\")
if (not username or
unsupported_chars.search(username) or
("%r" % username).find("\\") != -1):
raise ValueError("'%s' is not a valid user name." % username)
if len(username) > 16:
raise ValueError("User name '%s' is too long. Max length = 16" %
username)
def change_passwords(self, users):
for user in users:
# Use the model to check validity.
username = user['name']
self._check_username(username)
hostname = user['host']
password = user['password']
if (username, hostname) not in self.users:
raise rd_exception.UserNotFound(
"User %s@%s cannot be found on the instance."
% (username, hostname))
self.users[(username, hostname)]['password'] = password
def update_attributes(self, username, hostname, user_attrs):
LOG.debug("Updating attributes")
self._check_username(username)
if (username, hostname) not in self.users:
raise rd_exception.UserNotFound(
"User %s@%s cannot be found on the instance."
% (username, hostname))
new_name = user_attrs.get('name')
new_host = user_attrs.get('host')
new_password = user_attrs.get('password')
old_name = username
old_host = hostname
name = new_name or old_name
host = new_host or old_host
if new_name or new_host:
old_grants = self.grants.get((old_name, old_host), set())
self._create_user({
"_name": name,
"_host": host,
"_password": self.users[(old_name, host)]['_password'],
"_databases": [],
})
self.grants[(name, host)] = old_grants
del self.users[(old_name, old_host)]
if new_password:
self.users[(name, host)]['_password'] = new_password
def create_database(self, databases):
for db in databases:
self.dbs[db['_name']] = db
def create_user(self, users):
for user in users:
self._create_user(user)
def _create_user(self, user):
username = user['_name']
self._check_username(username)
hostname = user['_host']
if hostname is None:
hostname = '%'
self.users[(username, hostname)] = user
print("CREATING %s @ %s" % (username, hostname))
databases = [db['_name'] for db in user['_databases']]
self.grant_access(username, hostname, databases)
return user
def delete_database(self, database):
if database['_name'] in self.dbs:
del self.dbs[database['_name']]
def enable_root(self):
self.root_was_enabled = True
return self._create_user({
"_name": "root",
"_host": "%",
"_password": "12345",
"_databases": [],
})
def enable_root_with_password(self, root_password=None):
self.root_was_enabled = True
return self._create_user({
"_name": "root",
"_host": "%",
"_password": "12345",
"_databases": [],
})
def disable_root(self):
self.delete_user({
"_name": "root",
"_host": "%"})
def delete_user(self, user):
username = user['_name']
self._check_username(username)
hostname = user['_host']
self.grants[(username, hostname)] = set()
if (username, hostname) in self.users:
del self.users[(username, hostname)]
def is_root_enabled(self):
return self.root_was_enabled
def _list_resource(self, resource, limit=None, marker=None,
include_marker=False):
names = sorted([name for name in resource])
if marker in names:
if not include_marker:
# Cut off everything left of and including the marker item.
names = names[names.index(marker) + 1:]
else:
names = names[names.index(marker):]
next_marker = None
if limit:
if len(names) > limit:
next_marker = names[limit - 1]
names = names[:limit]
return [resource[name] for name in names], next_marker
def list_databases(self, limit=None, marker=None, include_marker=False):
return self._list_resource(self.dbs, limit, marker, include_marker)
def list_users(self, limit=None, marker=None, include_marker=False):
# The markers for users are a composite of the username and hostname.
names = sorted(["%s@%s" % (name, host) for (name, host) in self.users])
if marker in names:
if not include_marker:
# Cut off everything left of and including the marker item.
names = names[names.index(marker) + 1:]
else:
names = names[names.index(marker):]
next_marker = None
if limit:
if len(names) > limit:
next_marker = names[limit - 1]
names = names[:limit]
return ([self.users[unquote_user_host(userhost)]
for userhost in names], next_marker)
def get_user(self, username, hostname):
self._check_username(username)
for (u, h) in self.users:
print("%r @ %r" % (u, h))
if (username, hostname) not in self.users:
raise rd_exception.UserNotFound(
"User %s@%s cannot be found on the instance."
% (username, hostname))
return self.users.get((username, hostname), None)
def prepare(self, memory_mb, packages, databases, users, device_path=None,
mount_point=None, backup_info=None, config_contents=None,
root_password=None, overrides=None, cluster_config=None,
snapshot=None, modules=None):
from trove.guestagent.models import AgentHeartBeat
from trove.instance.models import DBInstance
from trove.instance.models import InstanceServiceStatus
LOG.debug("users... %s" % users)
LOG.debug("databases... %s" % databases)
instance_name = DBInstance.find_by(id=self.id).name
self.create_user(users)
self.create_database(databases)
self.overrides = overrides or {}
def update_db():
status = InstanceServiceStatus.find_by(instance_id=self.id)
if instance_name.endswith('GUEST_ERROR'):
status.status = rd_instance.ServiceStatuses.FAILED
else:
status.status = rd_instance.ServiceStatuses.RUNNING
status.save()
AgentHeartBeat.create(instance_id=self.id)
eventlet.spawn_after(3.5, update_db)
def _set_task_status(self, new_status='RUNNING'):
from trove.instance.models import InstanceServiceStatus
print("Setting status to %s" % new_status)
states = {'RUNNING': rd_instance.ServiceStatuses.RUNNING,
'SHUTDOWN': rd_instance.ServiceStatuses.SHUTDOWN,
}
status = InstanceServiceStatus.find_by(instance_id=self.id)
status.status = states[new_status]
status.save()
def restart(self):
# All this does is restart, and shut off the status updates while it
# does so. So there's actually nothing to do to fake this out except
# take a nap.
print("Sleeping for a second.")
time.sleep(1)
self._set_task_status('RUNNING')
def reset_configuration(self, config):
# There's nothing to do here, since there is no config to update.
pass
def start_db_with_conf_changes(self, config_contents):
time.sleep(2)
self._set_task_status('RUNNING')
def stop_db(self, do_not_start_on_reboot=False):
self._set_task_status('SHUTDOWN')
def get_volume_info(self):
"""Return used and total volume filesystem information in GB."""
return {'used': 0.16, 'total': 4.0}
def grant_access(self, username, hostname, databases):
"""Add a database to a users's grant list."""
if (username, hostname) not in self.users:
raise rd_exception.UserNotFound(
"User %s cannot be found on the instance." % username)
current_grants = self.grants.get((username, hostname), set())
for db in databases:
current_grants.add(db)
self.grants[(username, hostname)] = current_grants
def revoke_access(self, username, hostname, database):
"""Remove a database from a users's grant list."""
if (username, hostname) not in self.users:
raise rd_exception.UserNotFound(
"User %s cannot be found on the instance." % username)
if database not in self.grants.get((username, hostname), set()):
raise rd_exception.DatabaseNotFound(
"Database %s cannot be found on the instance." % database)
current_grants = self.grants.get((username, hostname), set())
if database in current_grants:
current_grants.remove(database)
self.grants[(username, hostname)] = current_grants
def list_access(self, username, hostname):
if (username, hostname) not in self.users:
raise rd_exception.UserNotFound(
"User %s cannot be found on the instance." % username)
current_grants = self.grants.get((username, hostname), set())
dbs = [{'_name': db,
'_collate': '',
'_character_set': '',
} for db in current_grants]
return dbs
def create_backup(self, backup_info):
from trove.backup.models import Backup
from trove.backup.state import BackupState
backup = Backup.get_by_id(context=None,
backup_id=backup_info['id'])
def finish_create_backup():
backup.state = BackupState.COMPLETED
backup.location = 'http://localhost/path/to/backup'
backup.checksum = 'fake-md5-sum'
backup.size = BACKUP_SIZE
backup.save()
eventlet.spawn_after(8.5, finish_create_backup)
def mount_volume(self, device_path=None, mount_point=None):
pass
def unmount_volume(self, device_path=None, mount_point=None):
pass
def resize_fs(self, device_path=None, mount_point=None):
pass
def update_overrides(self, overrides, remove=False):
self.overrides = overrides
def apply_overrides(self, overrides):
self.overrides = overrides
def get_replication_snapshot(self, snapshot_info,
replica_source_config=None):
self.create_backup(snapshot_info)
return {
'dataset':
{
'datastore_manager': 'mysql',
'dataset_size': '0.0',
'volume_size': '10.0',
'snapshot_id': None
},
'replication_strategy': 'replication_strategy',
'master': '1',
'log_position': '100'
}
def attach_replication_slave(self, snapshot, slave_config):
pass
def backup_required_for_replication(self):
return True
def post_processing_required_for_replication(self):
return False
def module_list(self, context, include_contents=False):
return []
def module_apply(self, context, modules=None):
return []
def module_remove(self, context, module=None):
pass
def get_or_create(id):
if id not in DB:
DB[id] = FakeGuest(id)
return DB[id]
def fake_create_guest_client(context, id):
return get_or_create(id)
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
from mock import Mock, call
from libcloud.common.upcloud import UpcloudCreateNodeRequestBody, UpcloudNodeDestroyer, UpcloudNodeOperations
from libcloud.common.upcloud import _StorageDevice
from libcloud.common.upcloud import UpcloudTimeoutException
from libcloud.common.upcloud import PlanPrice
from libcloud.compute.base import NodeImage, NodeSize, NodeLocation, NodeAuthSSHKey
from libcloud.test import unittest
class TestUpcloudCreateNodeRequestBody(unittest.TestCase):
def setUp(self):
self.image = NodeImage(id='01000000-0000-4000-8000-000030060200',
name='Ubuntu Server 16.04 LTS (Xenial Xerus)',
driver='',
extra={'type': 'template'})
self.location = NodeLocation(id='fi-hel1', name='Helsinki #1', country='FI', driver='')
self.size = NodeSize(id='1xCPU-1GB', name='1xCPU-1GB', ram=1024, disk=30, bandwidth=2048,
extra={'core_number': 1, 'storage_tier': 'maxiops'}, price=None, driver='')
def test_creating_node_from_template_image(self):
body = UpcloudCreateNodeRequestBody(name='ts', image=self.image, location=self.location, size=self.size)
json_body = body.to_json()
dict_body = json.loads(json_body)
expected_body = {
'server': {
'title': 'ts',
'hostname': 'localhost',
'plan': '1xCPU-1GB',
'zone': 'fi-hel1',
'login_user': {'username': 'root',
'create_password': 'yes'},
'storage_devices': {
'storage_device': [{
'action': 'clone',
'title': 'Ubuntu Server 16.04 LTS (Xenial Xerus)',
'storage': '01000000-0000-4000-8000-000030060200',
'size': 30,
'tier': 'maxiops',
}]
},
}
}
self.assertDictEqual(expected_body, dict_body)
def test_creating_node_from_cdrom_image(self):
image = NodeImage(id='01000000-0000-4000-8000-000030060200',
name='Ubuntu Server 16.04 LTS (Xenial Xerus)',
driver='',
extra={'type': 'cdrom'})
body = UpcloudCreateNodeRequestBody(name='ts', image=image, location=self.location, size=self.size)
json_body = body.to_json()
dict_body = json.loads(json_body)
expected_body = {
'server': {
'title': 'ts',
'hostname': 'localhost',
'plan': '1xCPU-1GB',
'zone': 'fi-hel1',
'login_user': {'username': 'root',
'create_password': 'yes'},
'storage_devices': {
'storage_device': [
{
'action': 'create',
'size': 30,
'tier': 'maxiops',
'title': 'Ubuntu Server 16.04 LTS (Xenial Xerus)',
},
{
'action': 'attach',
'storage': '01000000-0000-4000-8000-000030060200',
'type': 'cdrom'
}
]
}
}
}
self.assertDictEqual(expected_body, dict_body)
def test_creating_node_using_ssh_keys(self):
auth = NodeAuthSSHKey('sshkey')
body = UpcloudCreateNodeRequestBody(name='ts', image=self.image, location=self.location, size=self.size, auth=auth)
json_body = body.to_json()
dict_body = json.loads(json_body)
expected_body = {
'server': {
'title': 'ts',
'hostname': 'localhost',
'plan': '1xCPU-1GB',
'zone': 'fi-hel1',
'login_user': {
'username': 'root',
'ssh_keys': {
'ssh_key': [
'sshkey'
]
},
},
'storage_devices': {
'storage_device': [{
'action': 'clone',
'size': 30,
'title': 'Ubuntu Server 16.04 LTS (Xenial Xerus)',
'tier': 'maxiops',
'storage': '01000000-0000-4000-8000-000030060200'
}]
},
}
}
self.assertDictEqual(expected_body, dict_body)
def test_creating_node_using_hostname(self):
body = UpcloudCreateNodeRequestBody(name='ts', image=self.image, location=self.location, size=self.size,
ex_hostname='myhost.upcloud.com')
json_body = body.to_json()
dict_body = json.loads(json_body)
expected_body = {
'server': {
'title': 'ts',
'hostname': 'myhost.upcloud.com',
'plan': '1xCPU-1GB',
'zone': 'fi-hel1',
'login_user': {'username': 'root',
'create_password': 'yes'},
'storage_devices': {
'storage_device': [{
'action': 'clone',
'title': 'Ubuntu Server 16.04 LTS (Xenial Xerus)',
'storage': '01000000-0000-4000-8000-000030060200',
'tier': 'maxiops',
'size': 30
}]
},
}
}
self.assertDictEqual(expected_body, dict_body)
def test_creating_node_with_non_default_username(self):
body = UpcloudCreateNodeRequestBody(name='ts', image=self.image, location=self.location, size=self.size,
ex_username='someone')
json_body = body.to_json()
dict_body = json.loads(json_body)
login_user = dict_body['server']['login_user']
self.assertDictEqual({'username': 'someone', 'create_password': 'yes'}, login_user)
class TestStorageDevice(unittest.TestCase):
def setUp(self):
self.image = NodeImage(id='01000000-0000-4000-8000-000030060200',
name='Ubuntu Server 16.04 LTS (Xenial Xerus)',
driver='',
extra={'type': 'template'})
self.size = NodeSize(id='1xCPU-1GB', name='1xCPU-1GB', ram=1024, disk=30, bandwidth=2048,
extra={'core_number': 1}, price=None, driver='')
def test_storage_tier_default_value(self):
storagedevice = _StorageDevice(self.image, self.size)
d = storagedevice.to_dict()
self.assertEquals(d['storage_device'][0]['tier'], 'maxiops')
def test_storage_tier_given(self):
self.size.extra['storage_tier'] = 'hdd'
storagedevice = _StorageDevice(self.image, self.size)
d = storagedevice.to_dict()
self.assertEquals(d['storage_device'][0]['tier'], 'hdd')
class TestUpcloudNodeDestroyer(unittest.TestCase):
def setUp(self):
self.mock_sleep = Mock()
self.mock_operations = Mock(spec=UpcloudNodeOperations)
self.destroyer = UpcloudNodeDestroyer(self.mock_operations, sleep_func=self.mock_sleep)
def test_node_already_in_stopped_state(self):
self.mock_operations.get_node_state.side_effect = ['stopped']
self.assertTrue(self.destroyer.destroy_node(1))
self.assertTrue(self.mock_operations.stop_node.call_count == 0)
self.mock_operations.destroy_node.assert_called_once_with(1)
def test_node_in_error_state(self):
self.mock_operations.get_node_state.side_effect = ['error']
self.assertFalse(self.destroyer.destroy_node(1))
self.assertTrue(self.mock_operations.stop_node.call_count == 0)
self.assertTrue(self.mock_operations.destroy_node.call_count == 0)
def test_node_in_started_state(self):
self.mock_operations.get_node_state.side_effect = ['started', 'stopped']
self.assertTrue(self.destroyer.destroy_node(1))
self.mock_operations.stop_node.assert_called_once_with(1)
self.mock_operations.destroy_node.assert_called_once_with(1)
def test_node_in_maintenace_state(self):
self.mock_operations.get_node_state.side_effect = ['maintenance', 'maintenance', None]
self.assertTrue(self.destroyer.destroy_node(1))
self.mock_sleep.assert_has_calls([call(self.destroyer.WAIT_AMOUNT), call(self.destroyer.WAIT_AMOUNT)])
self.assertTrue(self.mock_operations.stop_node.call_count == 0)
self.assertTrue(self.mock_operations.destroy_node.call_count == 0)
def test_node_statys_in_started_state_for_awhile(self):
self.mock_operations.get_node_state.side_effect = ['started', 'started', 'stopped']
self.assertTrue(self.destroyer.destroy_node(1))
# Only one all for stop should be done
self.mock_operations.stop_node.assert_called_once_with(1)
self.mock_sleep.assert_has_calls([call(self.destroyer.WAIT_AMOUNT)])
self.mock_operations.destroy_node.assert_called_once_with(1)
def test_reuse(self):
"Verify that internal flag self.destroyer._stop_node is handled properly"
self.mock_operations.get_node_state.side_effect = ['started', 'stopped', 'started', 'stopped']
self.assertTrue(self.destroyer.destroy_node(1))
self.assertTrue(self.destroyer.destroy_node(1))
self.assertEquals(self.mock_sleep.call_count, 0)
self.assertEquals(self.mock_operations.stop_node.call_count, 2)
def test_timeout(self):
self.mock_operations.get_node_state.side_effect = ['maintenance'] * 50
self.assertRaises(UpcloudTimeoutException, self.destroyer.destroy_node, 1)
def test_timeout_reuse(self):
"Verify sleep count is handled properly"
self.mock_operations.get_node_state.side_effect = ['maintenance'] * 50
self.assertRaises(UpcloudTimeoutException, self.destroyer.destroy_node, 1)
self.mock_operations.get_node_state.side_effect = ['maintenance', None]
self.assertTrue(self.destroyer.destroy_node(1))
class TestPlanPrice(unittest.TestCase):
def setUp(self):
prices = [{'name': 'uk-lon1', 'server_plan_1xCPU-1GB': {'amount': 1, 'price': 1.488}},
{'name': 'fi-hel1', 'server_plan_1xCPU-1GB': {'amount': 1, 'price': 1.588}}]
self.pp = PlanPrice(prices)
def test_zone_prices(self):
location = NodeLocation(id='fi-hel1', name='Helsinki #1', country='FI', driver=None)
self.assertEqual(self.pp.get_price('1xCPU-1GB', location), 1.588)
def test_plan_not_found_in_zone(self):
location = NodeLocation(id='no_such_location', name='', country='', driver=None)
self.assertEqual(self.pp.get_price('1xCPU-1GB', location), None)
def test_no_location_given(self):
self.assertEqual(self.pp.get_price('1xCPU-1GB'), None)
if __name__ == '__main__':
sys.exit(unittest.main())
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from mox import IgnoreArg
from mox import IsA
from mox import stubout
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.solidfire import SolidFire
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.san_is_local = False
configuration.append_config_values(mox.IgnoreArg())
return configuration
class SolidFireVolumeTestCase(test.TestCase):
def setUp(self):
self._mox = mox.Mox()
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.sf_allow_tenant_qos = True
self.configuration.san_is_local = True
self.configuration.sf_emulate_512 = True
super(SolidFireVolumeTestCase, self).setUp()
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
def fake_issue_api_request(obj, method, params):
if method is 'GetClusterCapacity':
LOG.info('Called Fake GetClusterCapacity...')
data = {}
data = {'result':
{'clusterCapacity': {'maxProvisionedSpace': 99999999,
'usedSpace': 999,
'compressionPercent': 100,
'deDuplicationPercent': 100,
'thinProvisioningPercent': 100}}}
return data
if method is 'GetClusterInfo':
LOG.info('Called Fake GetClusterInfo...')
results = {'result': {'clusterInfo':
{'name': 'fake-cluster',
'mvip': '1.1.1.1',
'svip': '1.1.1.1',
'uniqueID': 'unqid',
'repCount': 2,
'attributes': {}}}}
return results
elif method is 'AddAccount':
LOG.info('Called Fake AddAccount...')
return {'result': {'accountID': 25}, 'id': 1}
elif method is 'GetAccountByName':
LOG.info('Called Fake GetAccountByName...')
results = {'result': {'account':
{'accountID': 25,
'username': params['username'],
'status': 'active',
'initiatorSecret': '123456789012',
'targetSecret': '123456789012',
'attributes': {},
'volumes': [6, 7, 20]}},
"id": 1}
return results
elif method is 'CreateVolume':
LOG.info('Called Fake CreateVolume...')
return {'result': {'volumeID': 5}, 'id': 1}
elif method is 'DeleteVolume':
LOG.info('Called Fake DeleteVolume...')
return {'result': {}, 'id': 1}
elif method is 'ListVolumesForAccount':
test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66'
LOG.info('Called Fake ListVolumesForAccount...')
result = {'result': {
'volumes': [{'volumeID': 5,
'name': test_name,
'accountID': 25,
'sliceCount': 1,
'totalSize': 1048576 * 1024,
'enable512e': True,
'access': "readWrite",
'status': "active",
'attributes': None,
'qos': None,
'iqn': test_name}]}}
return result
else:
LOG.error('Crap, unimplemented API call in Fake:%s' % method)
def fake_issue_api_request_fails(obj, method, params):
return {'error': {'code': 000,
'name': 'DummyError',
'message': 'This is a fake error response'},
'id': 1}
def fake_set_qos_by_volume_type(self, type_id, ctxt):
return {'minIOPS': 500,
'maxIOPS': 1000,
'burstIOPS': 1000}
def fake_volume_get(obj, key, default=None):
return {'qos': 'fast'}
def fake_update_cluster_status(self):
return
def test_create_with_qos_type(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(SolidFire, '_set_qos_by_volume_type',
self.fake_set_qos_by_volume_type)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': 'fast'}
sfv = SolidFire(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertNotEqual(model_update, None)
def test_create_volume(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': None}
sfv = SolidFire(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertNotEqual(model_update, None)
def test_create_volume_with_qos(self):
preset_qos = {}
preset_qos['qos'] = 'fast'
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'metadata': [preset_qos],
'volume_type_id': None}
sfv = SolidFire(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertNotEqual(model_update, None)
def test_create_volume_fails(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
self.stubs.Set(SolidFire, '_update_cluster_status',
self.fake_update_cluster_status)
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire(configuration=self.configuration)
try:
sfv.create_volume(testvol)
self.fail("Should have thrown Error")
except Exception:
pass
def test_create_sfaccount(self):
sfv = SolidFire(configuration=self.configuration)
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
account = sfv._create_sfaccount('project-id')
self.assertNotEqual(account, None)
def test_create_sfaccount_fails(self):
sfv = SolidFire(configuration=self.configuration)
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._create_sfaccount('project-id')
self.assertEqual(account, None)
def test_get_sfaccount_by_name(self):
sfv = SolidFire(configuration=self.configuration)
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
account = sfv._get_sfaccount_by_name('some-name')
self.assertNotEqual(account, None)
def test_get_sfaccount_by_name_fails(self):
sfv = SolidFire(configuration=self.configuration)
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._get_sfaccount_by_name('some-name')
self.assertEqual(account, None)
def test_delete_volume(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire(configuration=self.configuration)
sfv.delete_volume(testvol)
def test_delete_volume_fails_no_volume(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire(configuration=self.configuration)
try:
sfv.delete_volume(testvol)
self.fail("Should have thrown Error")
except Exception:
pass
def test_delete_volume_fails_account_lookup(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
self.stubs.Set(SolidFire, '_update_cluster_status',
self.fake_update_cluster_status)
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
sfv = SolidFire(configuration=self.configuration)
self.assertRaises(exception.SfAccountNotFound,
sfv.delete_volume,
testvol)
def test_get_cluster_info(self):
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request)
sfv = SolidFire(configuration=self.configuration)
sfv._get_cluster_info()
def test_get_cluster_info_fail(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
self.stubs.Set(SolidFire, '_update_cluster_status',
self.fake_update_cluster_status)
self.stubs.Set(SolidFire, '_issue_api_request',
self.fake_issue_api_request_fails)
sfv = SolidFire(configuration=self.configuration)
self.assertRaises(exception.SolidFireAPIException,
sfv._get_cluster_info)
| |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper class for instrumenation test jar."""
import collections
import logging
import os
import pickle
import re
from pylib import cmd_helper
from pylib import constants
# If you change the cached output of proguard, increment this number
PICKLE_FORMAT_VERSION = 1
class TestJar(object):
_ANNOTATIONS = frozenset(
['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', 'EnormousTest',
'FlakyTest', 'DisabledTest', 'Manual', 'PerfTest', 'HostDrivenTest'])
_DEFAULT_ANNOTATION = 'SmallTest'
_PROGUARD_CLASS_RE = re.compile(r'\s*?- Program class:\s*([\S]+)$')
_PROGUARD_METHOD_RE = re.compile(r'\s*?- Method:\s*(\S*)[(].*$')
_PROGUARD_ANNOTATION_RE = re.compile(r'\s*?- Annotation \[L(\S*);\]:$')
_PROGUARD_ANNOTATION_CONST_RE = (
re.compile(r'\s*?- Constant element value.*$'))
_PROGUARD_ANNOTATION_VALUE_RE = re.compile(r'\s*?- \S+? \[(.*)\]$')
def __init__(self, jar_path):
if not os.path.exists(jar_path):
raise Exception('%s not found, please build it' % jar_path)
sdk_root = os.getenv('ANDROID_SDK_ROOT', constants.ANDROID_SDK_ROOT)
self._PROGUARD_PATH = os.path.join(sdk_root,
'tools/proguard/bin/proguard.sh')
if not os.path.exists(self._PROGUARD_PATH):
self._PROGUARD_PATH = os.path.join(os.environ['ANDROID_BUILD_TOP'],
'external/proguard/bin/proguard.sh')
self._jar_path = jar_path
self._annotation_map = collections.defaultdict(list)
self._pickled_proguard_name = self._jar_path + '-proguard.pickle'
self._test_methods = []
if not self._GetCachedProguardData():
self._GetProguardData()
def _GetCachedProguardData(self):
if (os.path.exists(self._pickled_proguard_name) and
(os.path.getmtime(self._pickled_proguard_name) >
os.path.getmtime(self._jar_path))):
logging.info('Loading cached proguard output from %s',
self._pickled_proguard_name)
try:
with open(self._pickled_proguard_name, 'r') as r:
d = pickle.loads(r.read())
if d['VERSION'] == PICKLE_FORMAT_VERSION:
self._annotation_map = d['ANNOTATION_MAP']
self._test_methods = d['TEST_METHODS']
return True
except:
logging.warning('PICKLE_FORMAT_VERSION has changed, ignoring cache')
return False
def _GetProguardData(self):
proguard_output = cmd_helper.GetCmdOutput([self._PROGUARD_PATH,
'-injars', self._jar_path,
'-dontshrink',
'-dontoptimize',
'-dontobfuscate',
'-dontpreverify',
'-dump',
]).split('\n')
clazz = None
method = None
annotation = None
has_value = False
qualified_method = None
for line in proguard_output:
m = self._PROGUARD_CLASS_RE.match(line)
if m:
clazz = m.group(1).replace('/', '.') # Change package delim.
annotation = None
continue
m = self._PROGUARD_METHOD_RE.match(line)
if m:
method = m.group(1)
annotation = None
qualified_method = clazz + '#' + method
if method.startswith('test') and clazz.endswith('Test'):
self._test_methods += [qualified_method]
continue
if not qualified_method:
# Ignore non-method annotations.
continue
m = self._PROGUARD_ANNOTATION_RE.match(line)
if m:
annotation = m.group(1).split('/')[-1] # Ignore the annotation package.
self._annotation_map[qualified_method].append(annotation)
has_value = False
continue
if annotation:
if not has_value:
m = self._PROGUARD_ANNOTATION_CONST_RE.match(line)
if m:
has_value = True
else:
m = self._PROGUARD_ANNOTATION_VALUE_RE.match(line)
if m:
value = m.group(1)
self._annotation_map[qualified_method].append(
annotation + ':' + value)
has_value = False
logging.info('Storing proguard output to %s', self._pickled_proguard_name)
d = {'VERSION': PICKLE_FORMAT_VERSION,
'ANNOTATION_MAP': self._annotation_map,
'TEST_METHODS': self._test_methods}
with open(self._pickled_proguard_name, 'w') as f:
f.write(pickle.dumps(d))
def _GetAnnotationMap(self):
return self._annotation_map
def _IsTestMethod(self, test):
class_name, method = test.split('#')
return class_name.endswith('Test') and method.startswith('test')
def GetTestAnnotations(self, test):
"""Returns a list of all annotations for the given |test|. May be empty."""
if not self._IsTestMethod(test):
return []
return self._GetAnnotationMap()[test]
def _AnnotationsMatchFilters(self, annotation_filter_list, annotations):
"""Checks if annotations match any of the filters."""
if not annotation_filter_list:
return True
for annotation_filter in annotation_filter_list:
filters = annotation_filter.split('=')
if len(filters) == 2:
key = filters[0]
value_list = filters[1].split(',')
for value in value_list:
if key + ':' + value in annotations:
return True
elif annotation_filter in annotations:
return True
return False
def GetAnnotatedTests(self, annotation_filter_list):
"""Returns a list of all tests that match the given annotation filters."""
return [test for test, annotations in self._GetAnnotationMap().iteritems()
if self._IsTestMethod(test) and self._AnnotationsMatchFilters(
annotation_filter_list, annotations)]
def GetTestMethods(self):
"""Returns a list of all test methods in this apk as Class#testMethod."""
return self._test_methods
def _GetTestsMissingAnnotation(self):
"""Get a list of test methods with no known annotations."""
tests_missing_annotations = []
for test_method in self.GetTestMethods():
annotations_ = frozenset(self.GetTestAnnotations(test_method))
if (annotations_.isdisjoint(self._ANNOTATIONS) and
not self.IsHostDrivenTest(test_method)):
tests_missing_annotations.append(test_method)
return sorted(tests_missing_annotations)
def _GetAllMatchingTests(self, annotation_filter_list,
exclude_annotation_list, test_filter):
"""Get a list of tests matching any of the annotations and the filter.
Args:
annotation_filter_list: List of test annotations. A test must have at
least one of these annotations. A test without any annotations is
considered to be SmallTest.
exclude_annotation_list: List of test annotations. A test must not have
any of these annotations.
test_filter: Filter used for partial matching on the test method names.
Returns:
List of all matching tests.
"""
if annotation_filter_list:
available_tests = self.GetAnnotatedTests(annotation_filter_list)
# Include un-annotated tests in SmallTest.
if annotation_filter_list.count(self._DEFAULT_ANNOTATION) > 0:
for test in self._GetTestsMissingAnnotation():
logging.warning(
'%s has no annotations. Assuming "%s".', test,
self._DEFAULT_ANNOTATION)
available_tests.append(test)
if exclude_annotation_list:
excluded_tests = self.GetAnnotatedTests(exclude_annotation_list)
available_tests = list(set(available_tests) - set(excluded_tests))
else:
available_tests = [m for m in self.GetTestMethods()
if not self.IsHostDrivenTest(m)]
tests = []
if test_filter:
# |available_tests| are in adb instrument format: package.path.class#test.
filter_without_hash = test_filter.replace('#', '.')
tests = [t for t in available_tests
if filter_without_hash in t.replace('#', '.')]
else:
tests = available_tests
return tests
@staticmethod
def IsHostDrivenTest(test):
return 'pythonDrivenTests' in test
| |
# Copyright (c) 2016, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Reader for Neurolucida .ASC files, v3.
reversed engineered from looking at output from Neuroludica
"""
import warnings
from io import open
import numpy as np
from neurom.core.dataformat import COLS, POINT_TYPE
from .datawrapper import DataWrapper
WANTED_SECTIONS = {
'CellBody': POINT_TYPE.SOMA,
'Axon': POINT_TYPE.AXON,
'Dendrite': POINT_TYPE.BASAL_DENDRITE,
'Apical': POINT_TYPE.APICAL_DENDRITE,
}
UNWANTED_SECTION_NAMES = [
# Meta-data?
'Closed', 'Color', 'FillDensity', 'GUID', 'ImageCoords', 'MBFObjectType',
'Marker', 'Name', 'Resolution', 'Set', 'Description',
# Marker names?
'Asterisk', 'Cross', 'Dot', 'DoubleCircle', 'FilledCircle', 'FilledDownTriangle',
'FilledSquare', 'FilledStar', 'FilledUpTriangle', 'FilledUpTriangle', 'Flower',
'Flower2', 'OpenCircle', 'OpenDiamond', 'OpenDownTriangle', 'OpenSquare', 'OpenStar',
'OpenUpTriangle', 'Plus', 'ShadedStar', 'Splat', 'TriStar',
]
UNWANTED_SECTIONS = {name: True for name in UNWANTED_SECTION_NAMES}
def _match_section(section, match):
"""Checks whether the `type` of section is in the `match` dictionary.
Works around the unknown ordering of s-expressions in each section.
For instance, the `type` is the 3-rd one in for CellBodies
("CellBody"
(Color Yellow)
(CellBody)
(Set "cell10")
)
Returns:
value associated with match[section_type], None if no match
"""
# TODO: rewrite this so it is more clear, and handles sets & dictionaries for matching
for i in range(5):
if i >= len(section):
return None
if isinstance(section[i], str) and section[i] in match:
return match[section[i]]
return None
def _get_tokens(morph_fd):
"""Split a file-like into tokens: split on whitespace.
Note: this also strips newlines and comments
"""
for line in morph_fd:
line = line.rstrip() # remove \r\n
line = line.split(';', 1)[0] # strip comments
squash_token = [] # quoted strings get squashed into one token
if '<(' in line: # skip spines, which exist on a single line
assert ')>' in line, 'Missing end of spine'
# The following line is covered but 'tox -e coverage does not see it'
# TODO: find out why
continue # pragma: no cover
for token in line.replace('(', ' ( ').replace(')', ' ) ').split():
if squash_token:
squash_token.append(token)
if token.endswith('"'):
token = ' '.join(squash_token)
squash_token = []
yield token
elif token.startswith('"') and not token.endswith('"'):
squash_token.append(token)
else:
yield token
def _parse_section(token_iter):
"""Create a tree structure (defined by the s-expressions) from a stream of tokens."""
sexp = []
for token in token_iter:
if token == '(':
new_sexp = _parse_section(token_iter)
if not _match_section(new_sexp, UNWANTED_SECTIONS):
sexp.append(new_sexp)
elif token == ')':
return sexp
else:
sexp.append(token)
return sexp
def _parse_sections(morph_fd):
"""Returns array of all the sections that exist.
The format is nested lists that correspond to the s-expressions
"""
sections = []
token_iter = _get_tokens(morph_fd)
for token in token_iter:
if token == '(': # find top-level sections
section = _parse_section(token_iter)
if not _match_section(section, UNWANTED_SECTIONS):
sections.append(section)
return sections
def _flatten_subsection(subsection, _type, offset, parent):
"""Flatten a subsection from its nested version.
Args:
subsection: Nested subsection as produced by _parse_section, except one level in
_type: type of section, ie: AXON, etc
parent: first element has this as it's parent
offset: position in the final array of the first element
Returns:
Generator of values corresponding to [X, Y, Z, R, TYPE, ID, PARENT_ID]
"""
for row in subsection:
# TODO: Figure out what these correspond to in neurolucida
if row in ('Low', 'Generated', 'High', ):
continue
if isinstance(row[0], str):
if len(row) in (4, 5, ):
if len(row) == 5:
assert row[4][0] == 'S', \
'Only known usage of a fifth member is Sn, found: %s' % row[4][0]
yield (float(row[0]), float(row[1]), float(row[2]), float(row[3]) / 2.,
_type, offset, parent)
parent = offset
offset += 1
elif isinstance(row[0], list):
split_parent = offset - 1
start_offset = 0
slices = []
start = 0
for i, value in enumerate(row):
if value == '|':
slices.append(slice(start + start_offset, i))
start = i + 1
slices.append(slice(start + start_offset, len(row)))
for split_slice in slices:
for _row in _flatten_subsection(row[split_slice], _type, offset,
split_parent):
offset += 1
yield _row
def _extract_section(section):
"""Find top level sections, and get their flat contents, and append them all.
Returns a numpy array with the row format:
[X, Y, Z, R, TYPE, ID, PARENT_ID]
Note: PARENT_ID starts at -1 for soma and 0 for neurites
"""
# sections with only one element will be skipped,
if len(section) == 1:
assert section[0] == 'Sections', \
('Only known usage of a single Section content is "Sections", found %s' %
section[0])
return None
# try and detect type
_type = WANTED_SECTIONS.get(section[0][0], None)
start = 1
# CellBody often has [['"CellBody"'], ['CellBody'] as its first two elements
if _type is None:
_type = WANTED_SECTIONS.get(section[1][0], None)
if _type is None: # can't determine the type
return None
start = 2
parent = -1 if _type == POINT_TYPE.SOMA else 0
subsections = list(_flatten_subsection(section[start:], _type, offset=0,
parent=parent))
return np.array(subsections)
def _sections_to_raw_data(sections):
"""Convert list of sections into the `raw_data` format used in neurom.
This finds the soma, and attaches the neurites
"""
soma = None
neurites = []
for section in sections:
neurite = _extract_section(section)
if neurite is None:
continue
if neurite[0][COLS.TYPE] == POINT_TYPE.SOMA:
assert soma is None, 'Multiple somas defined in file'
soma = neurite
else:
neurites.append(neurite)
assert soma is not None, 'Missing CellBody element (ie. soma)'
total_length = len(soma) + sum(len(neurite) for neurite in neurites)
ret = np.zeros((total_length, 7,), dtype=np.float64)
pos = len(soma)
ret[0:pos, :] = soma
for neurite in neurites:
end = pos + len(neurite)
ret[pos:end, :] = neurite
ret[pos:end, COLS.P] += pos
ret[pos:end, COLS.ID] += pos
# TODO: attach the neurite at the closest point on the soma
ret[pos, COLS.P] = len(soma) - 1
pos = end
return ret
def read(morph_file, data_wrapper=DataWrapper):
"""Return a DataWrapper object.
It is 'raw_data' np.array with the full neuron, and the format of the file
suitable to be wrapped by DataWrapper
"""
warnings.warn('This is an experimental reader. '
'There are no guarantees regarding ability to parse '
'Neurolucida .asc files or correctness of output.')
with open(morph_file, encoding='utf-8', errors='replace') as morph_fd:
sections = _parse_sections(morph_fd)
raw_data = _sections_to_raw_data(sections)
return data_wrapper(raw_data, 'NL-ASCII')
| |
#!/usr/bin/env python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Merges Noto fonts."""
import os.path
import tempfile
from fontTools import merge
from fontTools import ttLib
from fontTools.ttLib.tables import otTables
from fontTools.unicodedata import ot_tags_from_script
def make_font_name(script):
if script:
return 'Noto Sans %s' % script
else:
return 'Noto Sans'
def make_puncless_font_name(script):
return make_font_name(script).replace(' ', '').replace('-', '')
def make_font_file_name(script, weight, directory='individual/unhinted'):
filename = '%s/%s-%s.ttf' % (
directory, make_puncless_font_name(script), weight)
return filename
def add_ui_alternative(table, target):
new_target = target + ' UI'
sources = table[target]
new_sources = [source + ' UI' for source in sources]
table[new_target] = new_sources
def has_gsub_table(fontfile):
font = ttLib.TTFont(fontfile)
return 'GSUB' in font
SCRIPT_TO_OPENTYPE_SCRIPT_TAG = {
'CypriotSyllabary': 'cprt',
'Deseret': 'dsrt',
'Glagolitic': 'glag',
'Lisu': 'lisu',
'Ogham': 'ogam',
'OldItalic': 'ital',
'Runic': 'runr',
'Shavian': 'shaw',
'Vai': 'vai ',
'Carian': 'cari',
'EgyptianHieroglyphs': 'egyp',
'ImperialAramaic': 'armi',
'LinearB': 'linb',
'Lycian': 'lyci',
'Lydian': 'lydi',
'OldPersian': 'xpeo',
'OldSouthArabian': 'sarb',
'OldTurkic': 'orkh',
'Osmanya': 'osma',
'Phoenician': 'phnx',
'SumeroAkkadianCuneiform': 'xsux',
'Ugaritic': 'ugar',
'OlChiki': 'olck',
'TaiLe': 'tale',
# Following keys are added to satisfy the use case in merge_fonts.py
# Reference:
# https://www.google.com/get/noto/#sans-xsux
# https://www.google.com/get/noto/#sans-cprt
# https://www.google.com/get/noto/#sans-yiii
# https://www.microsoft.com/typography/otspec/scripttags.htm
'Cuneiform': 'xsux',
'Cypriot': 'cprt',
'Yi': 'yi ',
}
def get_opentype_script_tags(fontfile):
scripts = []
dirname = os.path.split(os.path.dirname(fontfile))[1]
isoscripts = dirname[1:-1].split('__')
for isoscript in isoscripts:
scripts += ot_tags_from_script(isoscript)
if not len(scripts):
fontfile = os.path.basename(fontfile)
if fontfile.startswith('NotoSans-'):
fontfile = fontfile[8:]
fontfile = fontfile[:fontfile.index('-')]
scripts = [SCRIPT_TO_OPENTYPE_SCRIPT_TAG.get(fontfile, 'DFLT')]
return scripts
def add_gsub_to_font(fontfile):
"""Adds an empty GSUB table to a font."""
font = ttLib.TTFont(fontfile)
gsub_table = ttLib.getTableClass('GSUB')('GSUB')
gsub_table.table = otTables.GSUB()
gsub_table.table.Version = 1.0
gsub_table.table.ScriptList = otTables.ScriptList()
gsub_table.table.ScriptCount = 1
gsub_table.table.LookupList = otTables.LookupList()
gsub_table.table.LookupList.LookupCount = 0
gsub_table.table.LookupList.Lookup = []
gsub_table.table.FeatureList = otTables.FeatureList()
gsub_table.table.FeatureList.FeatureCount = 0
gsub_table.table.LookupList.FeatureRecord = []
for script in get_opentype_script_tags(fontfile):
script_record = otTables.ScriptRecord()
script_record.ScriptTag = script
script_record.Script = otTables.Script()
script_record.Script.LangSysCount = 0
script_record.Script.LangSysRecord = []
default_lang_sys = otTables.DefaultLangSys()
default_lang_sys.FeatureIndex = []
default_lang_sys.FeatureCount = 0
default_lang_sys.LookupOrder = None
default_lang_sys.ReqFeatureIndex = 65535
script_record.Script.DefaultLangSys = default_lang_sys
gsub_table.table.ScriptList.ScriptRecord = [script_record]
font['GSUB'] = gsub_table
target_file = tempfile.gettempdir() + '/' + os.path.basename(fontfile)
font.save(target_file)
return target_file
def main():
merge_table = {
'Historic': [
'Avestan',
'Carian',
'Egyptian Hieroglyphs',
'Imperial Aramaic',
'Pahlavi', # Should be 'Inscriptional Pahlavi',
'Parthian', # Should be 'Inscriptional Parthian',
'Linear B',
'Lycian',
'Lydian',
'Mandaic',
'Old Persian',
'Old South Arabian',
'Old Turkic',
'Osmanya',
'Phags-Pa',
'Phoenician',
'Samaritan',
'Sumero-Akkadian Cuneiform',
'Ugaritic',
],
'South Asian': [
'Devanagari',
'Bengali',
'Gurmukhi',
'Gujarati',
'Oriya',
'Tamil',
'Telugu',
'Kannada',
'Malayalam',
'Sinhala',
'Thaana',
'Brahmi',
'Kaithi',
'Kharoshthi', # Move to Historic?
'Lepcha',
'Limbu',
'Meetei Mayek',
'Ol Chiki',
'Saurashtra',
'Syloti Nagri',
],
'Southeast Asian': [
'Thai',
'Lao',
'Khmer',
'Batak',
'Buginese',
'Buhid',
'Cham',
'Hanunoo',
'Javanese',
'Kayah Li',
'New Tai Lue',
'Rejang',
'Sundanese',
'Tagalog',
'Tagbanwa',
'Tai Le',
'Tai Tham',
'Tai Viet',
],
'': [ # LGC,
'Armenian',
'Bamum',
'Canadian Aboriginal',
'Cherokee',
'Coptic',
'Cypriot Syllabary',
'Deseret',
'Ethiopic',
'Georgian',
'Glagolitic',
'Gothic',
'Hebrew',
'Lisu',
'NKo',
'Ogham',
'Old Italic',
'Runic',
'Shavian',
'Tifinagh',
'Vai',
],
}
add_ui_alternative(merge_table, 'South Asian')
add_ui_alternative(merge_table, 'Southeast Asian')
for merge_target in sorted(merge_table):
for weight in ['Regular', 'Bold']:
merger = merge.Merger()
source_fonts = merge_table[merge_target]
if '' not in source_fonts:
source_fonts = [''] + source_fonts # The LGC font
regular_sources = [make_font_file_name(script, weight)
for script in source_fonts]
regular_sources = [font
for font in regular_sources
if os.path.isfile(font)]
if len(regular_sources) <= 1:
continue
print('Merging Noto Sans %s %s' % (merge_target, weight))
for index, fontfile in enumerate(regular_sources):
if not has_gsub_table(fontfile):
regular_sources[index] = add_gsub_to_font(fontfile)
font = merger.merge(regular_sources)
first_font = source_fonts[0]
if first_font != merge_target:
for name_record in font['name'].names:
name = unicode(name_record.string, 'UTF-16BE')
name = name.replace(make_font_name(first_font),
make_font_name(merge_target))
name = name.replace(make_puncless_font_name(first_font),
make_puncless_font_name(merge_target))
name_record.string = name.encode('UTF-16BE')
font.save(make_font_file_name(
merge_target,
weight,
directory='combined/unhinted'))
if __name__ == '__main__':
main()
| |
# jasoncg
# 2015-02-22
#
# mlperceptron.py - A multilayer perceptron implementation in Python
#
# This implementation is designed for readability, not performance.
# Each layer in the perceptron is stored as a Perceptron instance. Each
# neuron is a seperate Neuron instance.
#
# Example:
# Generate a new random perceptron that takes 2 inputs and has 40 neurons
# in the first layer, then add a 40 neuron hidden layer and a 1 neuron
# output layer. Since the output layer has only 1 neuron, the network
# outputs only one value
#
# p=Perceptron.new_perceptron_random(2, 40)
# p.add_next_layer(40)
# p.add_next_layer(1)
#
# Train the network with backpropagation. This function takes three inputs:
# - A list of the input data
# - A list of the output data
# - The learning rate
# This particular example should train the network to AND two boolean values together.
#
# for i in range(0, 1000):
# p.backpropagate([0,0], [0], 0.1)
# p.backpropagate([0,1], [0], 0.1)
# p.backpropagate([1,0], [0], 0.1)
# p.backpropagate([1,1], [1], 0.1)
#
# Test the trained network. The evaluate function takes a list of input data and
# returns a list of the calculatated result
#
# print("%s %s" %([0,0], p.evaluate([0,0]))) # Shoule be 0
# print("%s %s" %([0,1], p.evaluate([0,1]))) # Shoule be 0
# print("%s %s" %([1,0], p.evaluate([1,0]))) # Shoule be 0
# print("%s %s" %([1,1], p.evaluate([1,1]))) # Shoule be 1
#
#
#
import timer
import random
import math
import numpy as np
import pyopencl as cl
import pyopencl.tools
import pyopencl.array
class Neuron:
def __init__(self, index, weights, perceptron_layer):
self.weights = weights
self.layer = perceptron_layer
self.last_guessed = None
self.error = None
self.dropout = False
@staticmethod
def tanh(input):
return math.tanh(input)
@staticmethod
def dxtanh(input):
return 1.0-math.pow(Neuron.tanh(input), 2.0)
@staticmethod
def sigmoid(input):
try:
ex=math.exp(-input)
return 1.0/(1.0+ex)
except:
# If overflow, snap to either 1 or 0
if -input>0:
# lim(sigmoid(x), -inf)=0
return 1
else:
# lim(sigmoid(x), inf)=1
return 0
#print("Fatal Error %s", -input)
@staticmethod
def dxsigmoid(input):
#ex = math.exp(input)
#return (ex/math.pow(1+ex, 2.0))
s = Neuron.sigmoid(input)
return s*(1-s)
@staticmethod
def activate(input):
return Neuron.sigmoid(input)
@staticmethod
def dxactivate(input):
return Neuron.dxsigmoid(input)
def last_guessedb(self):
if self.last_guessed<0.5:
return 0
return 1
@staticmethod
def to_output(output):
if output<0.5:
return 0
return 1
'''
@staticmethod
def activate(input):
return Neuron.tanh(input)
@staticmethod
def dxactivate(input):
return Neuron.dxtanh(input)
def last_guessedb(self):
if self.last_guessed<=0.0:
return False
return True
@staticmethod
def to_output(output):
if output<=0.0:
return False
return True
'''
def evaluate(self, inputs):
self.dropout = False
result = 0
#Calculate bias
result+=1*self.weights[0]
for i in range(1, len(self.weights)):
result+=inputs[i-1]*self.weights[i]
self.last_guessed=Neuron.activate(result)
return self.last_guessed
def calculate_error(self, expected):
self.error = expected - self.last_guessed
return self.error
def backpropagate(self, err):
di = Neuron.dxactivate(self.last_guessed)
self.error = err * di
return self.error
def backpropagate_update_weights(self, inputs, learning_rate):
# First, update the bias
self.weights[0] = self.weights[0] + learning_rate*self.error
for i in range(1, len(self.weights)):
change = learning_rate*self.error*inputs[i-1]
self.weights[i] = self.weights[i] + change
return self.evaluate(inputs)
class Perceptron:
@staticmethod
def generate_weights(input_count, neuron_count):
length = neuron_count*(input_count+1)
results = length*[0] #np.zeros(length)# []
for i in range(0, length):
results[i] = random.random()*2.0-1.0
return results
@staticmethod
def new_perceptron_random(input_count, neuron_count, perceptron_layer_prev=None):
weights = Perceptron.generate_weights(input_count, neuron_count)
return Perceptron(input_count, neuron_count, weights, perceptron_layer_prev)
def get_weight_count(self):
c=0
for n in range(0, len(self.neurons)):
c+=len(self.neurons[n].weights)
return c
def __init__(self, input_count, neuron_count, weights, perceptron_layer_prev=None):
self.neurons = neuron_count*[None]
self.layer_prev = perceptron_layer_prev
self.layer_next = None
if self.layer_prev == None:
self.index = 0
else:
self.index = self.layer_prev.index+1
weight_index = 0
weights_per_neuron = input_count + 1
for i in range(0, neuron_count):
n = Neuron(i, weights[weight_index:weight_index+weights_per_neuron], self)
weight_index+=weights_per_neuron
self.neurons[i]=n
def add_next_layer(self, neuron_count, weights=None):
if self.layer_next!=None:
return self.layer_next.add_next_layer(neuron_count, weights)
w = weights
if w==None:
w=Perceptron.generate_weights(len(self.neurons), neuron_count)
next = Perceptron(len(self.neurons), neuron_count, w, self)
self.layer_next = next
return next
def evaluate_error(self, inputs, expected):
results = self.evaluate(inputs)
error = 0
for i in range(0, len(expected)):
if results[i]!=expected[i]:
error+=1
error=error/len(expected)
return error
def evaluate(self, inputs, dropout_rate=0.0):
results = len(self.neurons)*[0]
layer_dropout_rate=0.0
# If this is a hidden layer, apply dropout (if set)
if self.layer_prev!=None and self.layer_next!=None:
layer_dropout_rate = dropout_rate
for i in range(0, len(self.neurons)):
if layer_dropout_rate>0:
if random.random()<=layer_dropout_rate:
self.neurons[i].dropout=True
results[i]=0
continue
self.neurons[i].dropout = False
results[i]=self.neurons[i].evaluate(inputs)
if self.layer_next!=None:
return self.layer_next.evaluate(results, dropout_rate)
else:
# If output layer, convert to boolean outputs
for i in range(0, len(results)):
results[i]=Neuron.to_output(results[i])
return results
# Calculate the error for this layer applicable to the
# specified neuron on the previous layer
# En=Sum(Win*Ei)
def get_error_for(self, previous_layer_neuron_index):
output = 0
# Step through each neuron on this layer
for i in range(0, len(self.neurons)):
#index+1 to account for bias (weight[0] is a bias, not fed by a neuron)
if self.neurons[i].dropout!=True:
output+=(self.neurons[i].weights[previous_layer_neuron_index+1]
*self.neurons[i].error)
return output
def backpropagate(self, inputs, expected, learning_rate):
if self.layer_prev is None:
# Input layer
self.evaluate(inputs) #, 0.50)
e = 0
if self.layer_next!=None:
# Not output layer
self.layer_next.backpropagate(None, expected, learning_rate)
for i in range(0, len(self.neurons)):
if self.neurons[i].dropout==True:
continue
err = self.layer_next.get_error_for(i)
e = self.neurons[i].backpropagate(err)
else:
# Output layer
total_errors = 0
for i in range(0, len(self.neurons)):
e = self.neurons[i].calculate_error(expected[i])
self.neurons[i].backpropagate(e)
lg = self.neurons[i].last_guessedb()
if lg!=expected[i]:
total_errors+=1
# Adjust learning_rate
learning_rate*=(1-total_errors/len(self.neurons))
if learning_rate>1.0:
learning_rate=1.0
elif learning_rate<=0.0001:
learning_rate=0.0001
# Now update the weights
if self.layer_prev==None:
# Back to input layer
self.backpropagate_update_weights(inputs, learning_rate)
return e
def backpropagate_update_weights(self, inputs, learning_rate):
results = len(self.neurons)*[0]
for i in range(0, len(self.neurons)):
if self.neurons[i].dropout!=True:
results[i]=self.neurons[i].backpropagate_update_weights(inputs, learning_rate)
else:
results[i]=0
if self.layer_next!=None:
self.layer_next.backpropagate_update_weights(results, learning_rate)
n=Neuron(1,[],3)
def test_AND():
p=Perceptron.new_perceptron_random(2, 40)
p.add_next_layer(40)
p.add_next_layer(1)
print("%s %s" %([0,0], p.evaluate([0,0])))
print("%s %s" %([0,1], p.evaluate([0,1])))
print("%s %s" %([1,0], p.evaluate([1,0])))
print("%s %s" %([1,1], p.evaluate([1,1])))
for i in range(0, 1000):
p.backpropagate([0,0], [0], 0.1)
p.backpropagate([0,1], [0], 0.1)
p.backpropagate([1,0], [0], 0.1)
p.backpropagate([1,1], [1], 0.1)
print("%s %s" %([0,0], p.evaluate([0,0])))
print("%s %s" %([0,1], p.evaluate([0,1])))
print("%s %s" %([1,0], p.evaluate([1,0])))
print("%s %s" %([1,1], p.evaluate([1,1])))
def test_XOR():
p=Perceptron.new_perceptron_random(2, 40)
p.add_next_layer(40)
p.add_next_layer(1)
print("%s %s" %([0,0], p.evaluate([0,0])))
print("%s %s" %([0,1], p.evaluate([0,1])))
print("%s %s" %([1,0], p.evaluate([1,0])))
print("%s %s" %([1,1], p.evaluate([1,1])))
for i in range(0, 2):
for i2 in range(0, 2000):
p.backpropagate([0,0], [0], 0.05)
p.backpropagate([0,1], [1], 0.05)
p.backpropagate([1,0], [1], 0.05)
p.backpropagate([1,1], [0], 0.05)
print("%s %s" %([0,0], p.evaluate([0,0])))
print("%s %s" %([0,1], p.evaluate([0,1])))
print("%s %s" %([1,0], p.evaluate([1,0])))
print("%s %s" %([1,1], p.evaluate([1,1])))
test_XOR()
| |
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import datetime
import clawpack.visclaw.colormaps as colormap
import clawpack.visclaw.data as geodata
import clawpack.visclaw.gaugetools as gaugetools
import clawpack.clawutil.data as clawutil
import clawpack.amrclaw.data as amrclaw
import clawpack.geoclaw.data as geodata
import clawpack.geoclaw.util as geoutil
import clawpack.geoclaw.surge.plot as surgeplot
try:
from setplotfg import setplotfg
except:
setplotfg = None
def setplot(plotdata=None):
""""""
if plotdata is None:
from clawpack.visclaw.data import ClawPlotData
plotdata = ClawPlotData()
# clear any old figures,axes,items data
plotdata.clearfigures()
plotdata.format = 'ascii'
# Load data from output
clawdata = clawutil.ClawInputData(2)
clawdata.read(os.path.join(plotdata.outdir, 'claw.data'))
physics = geodata.GeoClawData()
physics.read(os.path.join(plotdata.outdir, 'geoclaw.data'))
surge_data = geodata.SurgeData()
surge_data.read(os.path.join(plotdata.outdir, 'surge.data'))
friction_data = geodata.FrictionData()
friction_data.read(os.path.join(plotdata.outdir, 'friction.data'))
# Load storm track
track = surgeplot.track_data(os.path.join(plotdata.outdir, 'fort.track'))
# Set afteraxes function
def surge_afteraxes(cd):
surgeplot.surge_afteraxes(cd, track, plot_direction=False, kwargs={"markersize": 5})
# Color limits
surface_limits = [-2.5, 2.5]
speed_limits = [0.0, 3.0]
wind_limits = [0, 75]
pressure_limits = [910, 1010]
friction_bounds = [0.01, 0.04]
def friction_after_axes(cd):
plt.title(r"Manning's $n$ Coefficient")
# ==========================================================================
# Plot specifications
# ==========================================================================
regions = {"Caribbean": {"xlimits": (clawdata.lower[0], clawdata.upper[0]),
"ylimits": (clawdata.lower[1], clawdata.upper[1]),
"figsize": (6.4, 4.8)},
"Florida": {"xlimits": (-86, -79),
"ylimits": (23.5, 30.5),
"figsize": (8, 6)}}
for (name, region_dict) in regions.items():
# Surface Figure
plotfigure = plotdata.new_plotfigure(name="Surface - %s" % name)
plotfigure.kwargs = {"figsize": region_dict['figsize']}
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Surface"
plotaxes.xlimits = region_dict["xlimits"]
plotaxes.ylimits = region_dict["ylimits"]
plotaxes.afteraxes = surge_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
# Speed Figure
plotfigure = plotdata.new_plotfigure(name="Currents - %s" % name)
plotfigure.kwargs = {"figsize": region_dict['figsize']}
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = "Currents"
plotaxes.xlimits = region_dict["xlimits"]
plotaxes.ylimits = region_dict["ylimits"]
plotaxes.afteraxes = surge_afteraxes
surgeplot.add_speed(plotaxes, bounds=speed_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['speed'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
#
# Friction field
#
plotfigure = plotdata.new_plotfigure(name='Friction')
plotfigure.show = friction_data.variable_friction and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Caribbean']['xlimits']
plotaxes.ylimits = regions['Caribbean']['ylimits']
# plotaxes.title = "Manning's N Coefficient"
plotaxes.afteraxes = friction_after_axes
plotaxes.scaled = True
surgeplot.add_friction(plotaxes, bounds=friction_bounds, shrink=0.9)
plotaxes.plotitem_dict['friction'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['friction'].colorbar_label = "$n$"
#
# Hurricane Forcing fields
#
# Pressure field
plotfigure = plotdata.new_plotfigure(name='Pressure')
plotfigure.show = surge_data.pressure_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Caribbean']['xlimits']
plotaxes.ylimits = regions['Caribbean']['ylimits']
plotaxes.title = "Pressure Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_pressure(plotaxes, bounds=pressure_limits)
surgeplot.add_land(plotaxes)
# Wind field
plotfigure = plotdata.new_plotfigure(name='Wind Speed')
plotfigure.show = surge_data.wind_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = regions['Caribbean']['xlimits']
plotaxes.ylimits = regions['Caribbean']['ylimits']
plotaxes.title = "Wind Field"
plotaxes.afteraxes = surge_afteraxes
plotaxes.scaled = True
surgeplot.add_wind(plotaxes, bounds=wind_limits)
surgeplot.add_land(plotaxes)
# ========================================================================
# Figures for gauges
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Gauge Surfaces', figno=300, type='each_gauge')
plotfigure.show = True
plotfigure.clf_each_gauge = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [-4, 2]
# plotaxes.xlabel = "Days from landfall"
# plotaxes.ylabel = "Surface (m)"
plotaxes.ylimits = [-3.00, 3.00]
plotaxes.title = 'Surface'
def gauge_afteraxes(cd):
axes = plt.gca()
surgeplot.plot_landfall_gauge(cd.gaugesoln, axes)
gauge_id = ["8723970", "8724580", "8725110", "8726384", "8726724", "9755371"]
gauge_title = ["Vaca Key, Florida Bay, FL",
"Key West, FL",
"Naples, FL",
"Port Manatee, FL",
"Clearwater Beach, FL",
"San Juan, PR"]
# get noaa data
if (cd.gaugeno < 7):
realData = geoutil.fetch_noaa_tide_data(gauge_id[cd.gaugeno - 1], datetime.datetime(2017, 9, 6, hour=13), datetime.datetime(2017, 9, 12, hour=13), datum="MLLW")
values = realData[1] - realData[2]
times = []
for time in realData[0]:
times.append((time - np.datetime64("2017-09-10T13:00")).astype(float) / 1440)
plt.plot(times, values, color='orange', label='real')
# Fix up plot - in particular fix time labels
axes.set_title('Station %s' % cd.gaugeno)
axes.set_xlabel('Days relative to landfall')
axes.set_ylabel('Surface (m)')
axes.set_xlim([-4, 2])
axes.set_ylim([-3.00, 3.00])
axes.set_xticks([-4, -3, -2, -1, 0, 1, 2])
axes.set_xticklabels([r"$-4$", r"$-3$", r"$-2$", r"$-1$", r"$0$", r"$1$", r"$2$"])
axes.grid(True)
plotaxes.afteraxes = gauge_afteraxes
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
# plotitem.plot_var = 3
# plotitem.plotstyle = 'b-'
# individual gauge plots
def gauge_1_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos=[1], format_string='ko', add_labels=True)
def gauge_2_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos=[2], format_string='ko', add_labels=True)
def gauge_3_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos=[3], format_string='ko', add_labels=True)
def gauge_4_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos=[4], format_string='ko', add_labels=True)
def gauge_5_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos=[5], format_string='ko', add_labels=True)
def gauge_6_afteraxes(cd):
plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
gaugetools.plot_gauge_locations(cd.plotdata, gaugenos=[7], format_string='ko', add_labels=True)
plotfigure = plotdata.new_plotfigure(name="Gauge 1")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge 1'
plotaxes.scaled = True
plotaxes.xlimits = [-81.3, -80.9]
plotaxes.ylimits = [24.55, 24.95]
plotaxes.afteraxes = gauge_1_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
plotfigure = plotdata.new_plotfigure(name="Gauge 2")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge 2'
plotaxes.scaled = True
plotaxes.xlimits = [-82.0, -81.5]
plotaxes.ylimits = [24.25, 24.75]
plotaxes.afteraxes = gauge_2_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
plotfigure = plotdata.new_plotfigure(name="Gauge 3")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge 3'
plotaxes.scaled = True
plotaxes.xlimits = [-82.25, -81.5]
plotaxes.ylimits = [25.75, 26.5]
plotaxes.afteraxes = gauge_3_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
plotfigure = plotdata.new_plotfigure(name="Gauge 4")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge 4'
plotaxes.scaled = True
plotaxes.xlimits = [-83.0, -82.0]
plotaxes.ylimits = [27.0, 28.0]
plotaxes.afteraxes = gauge_4_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
plotfigure = plotdata.new_plotfigure(name="Gauge 5")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge 5'
plotaxes.scaled = True
plotaxes.xlimits = [-83.5, -82.5]
plotaxes.ylimits = [27.5, 28.5]
plotaxes.afteraxes = gauge_5_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
plotfigure = plotdata.new_plotfigure(name="Gauge 6")
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Gauge 6'
plotaxes.scaled = True
plotaxes.xlimits = [-66.5, -65.5]
plotaxes.ylimits = [18.0, 19.0]
plotaxes.afteraxes = gauge_6_afteraxes
surgeplot.add_surface_elevation(plotaxes, bounds=surface_limits)
surgeplot.add_land(plotaxes)
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0] * 10
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0] * 10
# -----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = [1, 2, 3, 4, 5, 6] # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True # parallel plotting
return plotdata
| |
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import netaddr
from neutron_lib import constants
from neutron_lib import exceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy as oslo_policy
from oslo_utils import excutils
import six
import webob.exc
from neutron._i18n import _, _LE, _LI
from neutron.api import api_common
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import rpc as n_rpc
from neutron.db import api as db_api
from neutron import policy
from neutron import quota
from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound,
exceptions.Conflict: webob.exc.HTTPConflict,
exceptions.InUse: webob.exc.HTTPConflict,
exceptions.BadRequest: webob.exc.HTTPBadRequest,
exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable,
exceptions.NotAuthorized: webob.exc.HTTPForbidden,
netaddr.AddrFormatError: webob.exc.HTTPBadRequest,
oslo_policy.PolicyNotAuthorized: webob.exc.HTTPForbidden
}
class Controller(object):
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
UPDATE = 'update'
DELETE = 'delete'
@property
def plugin(self):
return self._plugin
@property
def resource(self):
return self._resource
def __init__(self, plugin, collection, resource, attr_info,
allow_bulk=False, member_actions=None, parent=None,
allow_pagination=False, allow_sorting=False):
if member_actions is None:
member_actions = []
self._plugin = plugin
self._collection = collection.replace('-', '_')
self._resource = resource.replace('-', '_')
self._attr_info = attr_info
self._allow_bulk = allow_bulk
self._allow_pagination = allow_pagination
self._allow_sorting = allow_sorting
self._native_bulk = self._is_native_bulk_supported()
self._native_pagination = self._is_native_pagination_supported()
self._native_sorting = self._is_native_sorting_supported()
self._policy_attrs = [name for (name, info) in self._attr_info.items()
if info.get('required_by_policy')]
self._notifier = n_rpc.get_notifier('network')
# use plugin's dhcp notifier, if this is already instantiated
agent_notifiers = getattr(plugin, 'agent_notifiers', {})
self._dhcp_agent_notifier = (
agent_notifiers.get(constants.AGENT_TYPE_DHCP) or
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
if cfg.CONF.notify_nova_on_port_data_changes:
from neutron.notifiers import nova
self._nova_notifier = nova.Notifier()
self._member_actions = member_actions
self._primary_key = self._get_primary_key()
if self._allow_pagination and self._native_pagination:
# Native pagination need native sorting support
if not self._native_sorting:
raise exceptions.Invalid(
_("Native pagination depend on native sorting")
)
if not self._allow_sorting:
LOG.info(_LI("Allow sorting is enabled because native "
"pagination requires native sorting"))
self._allow_sorting = True
if parent:
self._parent_id_name = '%s_id' % parent['member_name']
parent_part = '_%s' % parent['member_name']
else:
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in six.iteritems(self._attr_info):
if value.get('primary_key', False):
return key
return default_primary_key
def _is_native_bulk_supported(self):
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_bulk_attr_name, False)
def _is_native_pagination_supported(self):
native_pagination_attr_name = ("_%s__native_pagination_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_pagination_attr_name, False)
def _is_native_sorting_supported(self):
native_sorting_attr_name = ("_%s__native_sorting_support"
% self._plugin.__class__.__name__)
return getattr(self._plugin, native_sorting_attr_name, False)
def _exclude_attributes_by_policy(self, context, data):
"""Identifies attributes to exclude according to authZ policies.
Return a list of attribute names which should be stripped from the
response returned to the user because the user is not authorized
to see them.
"""
attributes_to_exclude = []
for attr_name in data.keys():
attr_data = self._attr_info.get(attr_name)
if attr_data and attr_data['is_visible']:
if policy.check(
context,
'%s:%s' % (self._plugin_handlers[self.SHOW], attr_name),
data,
might_not_exist=True,
pluralized=self._collection):
# this attribute is visible, check next one
continue
# if the code reaches this point then either the policy check
# failed or the attribute was not visible in the first place
attributes_to_exclude.append(attr_name)
return attributes_to_exclude
def _view(self, context, data, fields_to_strip=None):
"""Build a view of an API resource.
:param context: the neutron context
:param data: the object for which a view is being created
:param fields_to_strip: attributes to remove from the view
:returns: a view of the object which includes only attributes
visible according to API resource declaration and authZ policies.
"""
fields_to_strip = ((fields_to_strip or []) +
self._exclude_attributes_by_policy(context, data))
return self._filter_attributes(context, data, fields_to_strip)
def _filter_attributes(self, context, data, fields_to_strip=None):
if not fields_to_strip:
return data
return dict(item for item in six.iteritems(data)
if (item[0] not in fields_to_strip))
def _do_field_list(self, original_fields):
fields_to_add = None
# don't do anything if fields were not specified in the request
if original_fields:
fields_to_add = [attr for attr in self._policy_attrs
if attr not in original_fields]
original_fields.extend(self._policy_attrs)
return original_fields, fields_to_add
def __getattr__(self, name):
if name in self._member_actions:
@db_api.retry_db_errors
def _handle_action(request, id, **kwargs):
arg_list = [request.context, id]
# Ensure policy engine is initialized
policy.init()
# Fetch the resource and verify if the user can access it
try:
parent_id = kwargs.get(self._parent_id_name)
resource = self._item(request,
id,
do_authz=True,
field_list=None,
parent_id=parent_id)
except oslo_policy.PolicyNotAuthorized:
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
body = copy.deepcopy(kwargs.pop('body', None))
# Explicit comparison with None to distinguish from {}
if body is not None:
arg_list.append(body)
# It is ok to raise a 403 because accessibility to the
# object was checked earlier in this method
policy.enforce(request.context,
name,
resource,
pluralized=self._collection)
ret_value = getattr(self._plugin, name)(*arg_list, **kwargs)
# It is simply impossible to predict whether one of this
# actions alters resource usage. For instance a tenant port
# is created when a router interface is added. Therefore it is
# important to mark as dirty resources whose counters have
# been altered by this operation
resource_registry.set_resources_dirty(request.context)
return ret_value
return _handle_action
else:
raise AttributeError()
def _get_pagination_helper(self, request):
if self._allow_pagination and self._native_pagination:
return api_common.PaginationNativeHelper(request,
self._primary_key)
elif self._allow_pagination:
return api_common.PaginationEmulatedHelper(request,
self._primary_key)
return api_common.NoPaginationHelper(request, self._primary_key)
def _get_sorting_helper(self, request):
if self._allow_sorting and self._native_sorting:
return api_common.SortingNativeHelper(request, self._attr_info)
elif self._allow_sorting:
return api_common.SortingEmulatedHelper(request, self._attr_info)
return api_common.NoSortingHelper(request, self._attr_info)
def _items(self, request, do_authz=False, parent_id=None):
"""Retrieves and formats a list of elements of the requested entity."""
# NOTE(salvatore-orlando): The following ensures that fields which
# are needed for authZ policy validation are not stripped away by the
# plugin before returning.
original_fields, fields_to_add = self._do_field_list(
api_common.list_args(request, 'fields'))
filters = api_common.get_filters(request, self._attr_info,
['fields', 'sort_key', 'sort_dir',
'limit', 'marker', 'page_reverse'])
kwargs = {'filters': filters,
'fields': original_fields}
sorting_helper = self._get_sorting_helper(request)
pagination_helper = self._get_pagination_helper(request)
sorting_helper.update_args(kwargs)
sorting_helper.update_fields(original_fields, fields_to_add)
pagination_helper.update_args(kwargs)
pagination_helper.update_fields(original_fields, fields_to_add)
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, self._plugin_handlers[self.LIST])
obj_list = obj_getter(request.context, **kwargs)
obj_list = sorting_helper.sort(obj_list)
obj_list = pagination_helper.paginate(obj_list)
# Check authz
if do_authz:
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
# Omit items from list that should not be visible
obj_list = [obj for obj in obj_list
if policy.check(request.context,
self._plugin_handlers[self.SHOW],
obj,
plugin=self._plugin,
pluralized=self._collection)]
# Use the first element in the list for discriminating which attributes
# should be filtered out because of authZ policies
# fields_to_add contains a list of attributes added for request policy
# checks but that were not required by the user. They should be
# therefore stripped
fields_to_strip = fields_to_add or []
if obj_list:
fields_to_strip += self._exclude_attributes_by_policy(
request.context, obj_list[0])
collection = {self._collection:
[self._filter_attributes(
request.context, obj,
fields_to_strip=fields_to_strip)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
collection[self._collection + "_links"] = pagination_links
# Synchronize usage trackers, if needed
resource_registry.resync_resource(
request.context, self._resource, request.context.tenant_id)
return collection
def _item(self, request, id, do_authz=False, field_list=None,
parent_id=None):
"""Retrieves and formats a single element of the requested entity."""
kwargs = {'fields': field_list}
action = self._plugin_handlers[self.SHOW]
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj_getter = getattr(self._plugin, action)
obj = obj_getter(request.context, id, **kwargs)
# Check authz
# FIXME(salvatore-orlando): obj_getter might return references to
# other resources. Must check authZ on them too.
if do_authz:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
return obj
def _send_dhcp_notification(self, context, data, methodname):
if cfg.CONF.dhcp_agent_notification:
if self._collection in data:
for body in data[self._collection]:
item = {self._resource: body}
self._dhcp_agent_notifier.notify(context, item, methodname)
else:
self._dhcp_agent_notifier.notify(context, data, methodname)
def _send_nova_notification(self, action, orig, returned):
if hasattr(self, '_nova_notifier'):
self._nova_notifier.send_network_change(action, orig, returned)
@db_api.retry_db_errors
def index(self, request, **kwargs):
"""Returns a list of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return self._items(request, True, parent_id)
@db_api.retry_db_errors
def show(self, request, id, **kwargs):
"""Returns detailed information about the requested entity."""
try:
# NOTE(salvatore-orlando): The following ensures that fields
# which are needed for authZ policy validation are not stripped
# away by the plugin before returning.
field_list, added_fields = self._do_field_list(
api_common.list_args(request, "fields"))
parent_id = kwargs.get(self._parent_id_name)
# Ensure policy engine is initialized
policy.init()
return {self._resource:
self._view(request.context,
self._item(request,
id,
do_authz=True,
field_list=field_list,
parent_id=parent_id),
fields_to_strip=added_fields)}
except oslo_policy.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def _emulate_bulk_create(self, obj_creator, request, body, parent_id=None):
objs = []
try:
for item in body[self._collection]:
kwargs = {self._resource: item}
if parent_id:
kwargs[self._parent_id_name] = parent_id
fields_to_strip = self._exclude_attributes_by_policy(
request.context, item)
objs.append(self._filter_attributes(
request.context,
obj_creator(request.context, **kwargs),
fields_to_strip=fields_to_strip))
return objs
# Note(salvatore-orlando): broad catch as in theory a plugin
# could raise any kind of exception
except Exception:
with excutils.save_and_reraise_exception():
for obj in objs:
obj_deleter = getattr(self._plugin,
self._plugin_handlers[self.DELETE])
try:
kwargs = ({self._parent_id_name: parent_id}
if parent_id else {})
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the
# exception
LOG.exception(_LE("Unable to undo add for "
"%(resource)s %(id)s"),
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
# plugin raised might have been created or not in the db.
# We need a way for ensuring that if it has been created,
# it is then deleted
def create(self, request, body=None, **kwargs):
self._notifier.info(request.context,
self._resource + '.create.start',
body)
return self._create(request, body, **kwargs)
@db_api.retry_db_errors
def _create(self, request, body, **kwargs):
"""Creates a new instance of the requested entity."""
parent_id = kwargs.get(self._parent_id_name)
body = Controller.prepare_request_body(request.context,
copy.deepcopy(body), True,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.CREATE]
# Check authz
if self._collection in body:
# Have to account for bulk create
items = body[self._collection]
else:
items = [body]
# Ensure policy engine is initialized
policy.init()
# Store requested resource amounts grouping them by tenant
# This won't work with multiple resources. However because of the
# current structure of this controller there will hardly be more than
# one resource for which reservations are being made
request_deltas = collections.defaultdict(int)
for item in items:
self._validate_network_tenant_ownership(request,
item[self._resource])
policy.enforce(request.context,
action,
item[self._resource],
pluralized=self._collection)
if 'tenant_id' not in item[self._resource]:
# no tenant_id - no quota check
continue
tenant_id = item[self._resource]['tenant_id']
request_deltas[tenant_id] += 1
# Quota enforcement
reservations = []
try:
for (tenant, delta) in request_deltas.items():
reservation = quota.QUOTAS.make_reservation(
request.context,
tenant,
{self._resource: delta},
self._plugin)
reservations.append(reservation)
except n_exc.QuotaResourceUnknown as e:
# We don't want to quota this resource
LOG.debug(e)
def notify(create_result):
# Ensure usage trackers for all resources affected by this API
# operation are marked as dirty
with request.context.session.begin():
# Commit the reservation(s)
for reservation in reservations:
quota.QUOTAS.commit_reservation(
request.context, reservation.reservation_id)
resource_registry.set_resources_dirty(request.context)
notifier_method = self._resource + '.create.end'
self._notifier.info(request.context,
notifier_method,
create_result)
self._send_dhcp_notification(request.context,
create_result,
notifier_method)
return create_result
def do_create(body, bulk=False, emulated=False):
kwargs = {self._parent_id_name: parent_id} if parent_id else {}
if bulk and not emulated:
obj_creator = getattr(self._plugin, "%s_bulk" % action)
else:
obj_creator = getattr(self._plugin, action)
try:
if emulated:
return self._emulate_bulk_create(obj_creator, request,
body, parent_id)
else:
if self._collection in body:
# This is weird but fixing it requires changes to the
# plugin interface
kwargs.update({self._collection: body})
else:
kwargs.update({self._resource: body})
return obj_creator(request.context, **kwargs)
except Exception:
# In case of failure the plugin will always raise an
# exception. Cancel the reservation
with excutils.save_and_reraise_exception():
for reservation in reservations:
quota.QUOTAS.cancel_reservation(
request.context, reservation.reservation_id)
if self._collection in body and self._native_bulk:
# plugin does atomic bulk create operations
objs = do_create(body, bulk=True)
# Use first element of list to discriminate attributes which
# should be removed because of authZ policies
fields_to_strip = self._exclude_attributes_by_policy(
request.context, objs[0])
return notify({self._collection: [self._filter_attributes(
request.context, obj, fields_to_strip=fields_to_strip)
for obj in objs]})
else:
if self._collection in body:
# Emulate atomic bulk behavior
objs = do_create(body, bulk=True, emulated=True)
return notify({self._collection: objs})
else:
obj = do_create(body)
self._send_nova_notification(action, {},
{self._resource: obj})
return notify({self._resource: self._view(request.context,
obj)})
def delete(self, request, id, **kwargs):
"""Deletes the specified entity."""
if request.body:
msg = _('Request body is not supported in DELETE.')
raise webob.exc.HTTPBadRequest(msg)
self._notifier.info(request.context,
self._resource + '.delete.start',
{self._resource + '_id': id})
return self._delete(request, id, **kwargs)
@db_api.retry_db_errors
def _delete(self, request, id, **kwargs):
action = self._plugin_handlers[self.DELETE]
# Check authz
policy.init()
parent_id = kwargs.get(self._parent_id_name)
obj = self._item(request, id, parent_id=parent_id)
try:
policy.enforce(request.context,
action,
obj,
pluralized=self._collection)
except oslo_policy.PolicyNotAuthorized:
# To avoid giving away information, pretend that it
# doesn't exist
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_deleter = getattr(self._plugin, action)
obj_deleter(request.context, id, **kwargs)
# A delete operation usually alters resource usage, so mark affected
# usage trackers as dirty
resource_registry.set_resources_dirty(request.context)
notifier_method = self._resource + '.delete.end'
self._notifier.info(request.context,
notifier_method,
{self._resource + '_id': id})
result = {self._resource: self._view(request.context, obj)}
self._send_nova_notification(action, {}, result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
def update(self, request, id, body=None, **kwargs):
"""Updates the specified entity's attributes."""
try:
payload = body.copy()
except AttributeError:
msg = _("Invalid format: %s") % request.body
raise exceptions.BadRequest(resource='body', msg=msg)
payload['id'] = id
self._notifier.info(request.context,
self._resource + '.update.start',
payload)
return self._update(request, id, body, **kwargs)
@db_api.retry_db_errors
def _update(self, request, id, body, **kwargs):
body = Controller.prepare_request_body(request.context,
copy.deepcopy(body), False,
self._resource, self._attr_info,
allow_bulk=self._allow_bulk)
action = self._plugin_handlers[self.UPDATE]
# Load object to check authz
# but pass only attributes in the original body and required
# by the policy engine to the policy 'brain'
field_list = [name for (name, value) in six.iteritems(self._attr_info)
if (value.get('required_by_policy') or
value.get('primary_key') or
'default' not in value)]
# Ensure policy engine is initialized
policy.init()
parent_id = kwargs.get(self._parent_id_name)
orig_obj = self._item(request, id, field_list=field_list,
parent_id=parent_id)
orig_object_copy = copy.copy(orig_obj)
orig_obj.update(body[self._resource])
# Make a list of attributes to be updated to inform the policy engine
# which attributes are set explicitly so that it can distinguish them
# from the ones that are set to their default values.
orig_obj[n_const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
try:
policy.enforce(request.context,
action,
orig_obj,
pluralized=self._collection)
except oslo_policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception() as ctxt:
# If a tenant is modifying it's own object, it's safe to return
# a 403. Otherwise, pretend that it doesn't exist to avoid
# giving away information.
if request.context.tenant_id != orig_obj['tenant_id']:
ctxt.reraise = False
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
obj_updater = getattr(self._plugin, action)
kwargs = {self._resource: body}
if parent_id:
kwargs[self._parent_id_name] = parent_id
obj = obj_updater(request.context, id, **kwargs)
# Usually an update operation does not alter resource usage, but as
# there might be side effects it might be worth checking for changes
# in resource usage here as well (e.g: a tenant port is created when a
# router interface is added)
resource_registry.set_resources_dirty(request.context)
result = {self._resource: self._view(request.context, obj)}
notifier_method = self._resource + '.update.end'
self._notifier.info(request.context, notifier_method, result)
self._send_dhcp_notification(request.context,
result,
notifier_method)
self._send_nova_notification(action, orig_object_copy, result)
return result
@staticmethod
def prepare_request_body(context, body, is_create, resource, attr_info,
allow_bulk=False):
"""Verifies required attributes are in request body.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
body argument must be the deserialized body.
"""
collection = resource + "s"
if not body:
raise webob.exc.HTTPBadRequest(_("Resource body required"))
LOG.debug("Request body: %(body)s", {'body': body})
try:
if collection in body:
if not allow_bulk:
raise webob.exc.HTTPBadRequest(_("Bulk operation "
"not supported"))
if not body[collection]:
raise webob.exc.HTTPBadRequest(_("Resources required"))
bulk_body = [
Controller.prepare_request_body(
context, item if resource in item
else {resource: item}, is_create, resource, attr_info,
allow_bulk) for item in body[collection]
]
return {collection: bulk_body}
res_dict = body.get(resource)
except (AttributeError, TypeError):
msg = _("Body contains invalid data")
raise webob.exc.HTTPBadRequest(msg)
if res_dict is None:
msg = _("Unable to find '%s' in request body") % resource
raise webob.exc.HTTPBadRequest(msg)
attributes.populate_tenant_id(context, res_dict, attr_info, is_create)
attributes.verify_attributes(res_dict, attr_info)
if is_create: # POST
attributes.fill_default_value(attr_info, res_dict,
webob.exc.HTTPBadRequest)
else: # PUT
for attr, attr_vals in six.iteritems(attr_info):
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
attributes.convert_value(attr_info, res_dict, webob.exc.HTTPBadRequest)
return body
def _validate_network_tenant_ownership(self, request, resource_item):
# TODO(salvatore-orlando): consider whether this check can be folded
# in the policy engine
if (request.context.is_admin or request.context.is_advsvc or
self._resource not in ('port', 'subnet')):
return
network = self._plugin.get_network(
request.context,
resource_item['network_id'])
# do not perform the check on shared networks
if network.get('shared'):
return
network_owner = network['tenant_id']
if network_owner != resource_item['tenant_id']:
# NOTE(kevinbenton): we raise a 404 to hide the existence of the
# network from the tenant since they don't have access to it.
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
def create_resource(collection, resource, plugin, params, allow_bulk=False,
member_actions=None, parent=None, allow_pagination=False,
allow_sorting=False):
controller = Controller(plugin, collection, resource, params, allow_bulk,
member_actions=member_actions, parent=parent,
allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
return wsgi_resource.Resource(controller, FAULT_MAP)
| |
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
#'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
#'-std=c++11',
#'-std=c99',
'-std=c11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
#'c++',
'c',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'/home/suosuopuo/CUnitHome/include',
#'-I',
#'/home/suosuopuo/work/book/ctools/hash/include',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../include/c++/v1',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| |
import binascii
import json
from neo.Prompt.CommandBase import CommandBase, CommandDesc, ParameterDesc
from neo.Prompt.PromptData import PromptData
from neo.Prompt.Commands.LoadSmartContract import ImportContractAddr
from neo.Prompt import Utils as PromptUtils
from neo.Core.KeyPair import KeyPair
from neo.Network.common import blocking_prompt as prompt
from neo.Core.Utils import isValidPublicAddress
from neo.Core.UInt160 import UInt160
from neo.Core.Cryptography.Crypto import Crypto
from neo.SmartContract.Contract import Contract
from neo.Core.Blockchain import Blockchain
from neo.Wallets import NEP5Token
from neo.Prompt.PromptPrinter import prompt_print as print
class CommandWalletImport(CommandBase):
def __init__(self):
super().__init__()
self.register_sub_command(CommandWalletImportWIF())
self.register_sub_command(CommandWalletImportNEP2())
self.register_sub_command(CommandWalletImportWatchAddr())
self.register_sub_command(CommandWalletImportMultisigAddr())
self.register_sub_command(CommandWalletImportToken())
self.register_sub_command(CommandWalletImportContractAddr())
def command_desc(self):
return CommandDesc('import', 'import wallet items')
def execute(self, arguments):
item = PromptUtils.get_arg(arguments)
if not item:
print(f"run `{self.command_desc().command} help` to see supported queries")
return False
try:
return self.execute_sub_command(item, arguments[1:])
except KeyError:
print(f"{item} is an invalid parameter")
return False
class CommandWalletImportWIF(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) != 1:
print("Please specify the required parameter")
return False
wif = arguments[0]
try:
kp = KeyPair.PrivateKeyFromWIF(wif)
except ValueError as e:
print(f"WIF Error: {str(e)}")
return False
try:
key = wallet.CreateKey(kp)
print(f"Imported key: {wif}")
pub_key = key.PublicKey.encode_point(True).decode('utf-8')
print(f"Pubkey: {pub_key}")
print(f"Address: {key.GetAddress()}")
except Exception as e:
# couldn't find an exact call that throws this but it was in the old code. Leaving it in for now.
print(f"Key creation error: {str(e)}")
return False
return True
def command_desc(self):
p1 = ParameterDesc('key', 'private key record in WIF format')
return CommandDesc('wif', 'import an unprotected private key record of an address', [p1])
class CommandWalletImportNEP2(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) != 1:
print("Please specify the required parameter")
return False
nep2_key = arguments[0]
passphrase = prompt("[key password] ", is_password=True)
try:
kp = KeyPair.PrivateKeyFromNEP2(nep2_key, passphrase)
except ValueError as e:
print(str(e))
return False
try:
key = wallet.CreateKey(kp)
print(f"Imported key: {nep2_key}")
pub_key = key.PublicKey.encode_point(True).decode('utf-8')
print(f"Pubkey: {pub_key}")
print(f"Address: {key.GetAddress()}")
except Exception as e:
# couldn't find an exact call that throws this but it was in the old code. Leaving it in for now.
print(f"Key creation error: {str(e)}")
return False
def command_desc(self):
p1 = ParameterDesc('private key', 'NEP-2 protected private key')
return CommandDesc('nep2', 'import a passphrase protected private key record (NEP-2 format)', [p1])
class CommandWalletImportWatchAddr(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) != 1:
print("Please specify the required parameter")
return False
addr = arguments[0]
if not isValidPublicAddress(addr):
print("Invalid address specified")
return False
try:
addr_script_hash = wallet.ToScriptHash(addr)
wallet.AddWatchOnly(addr_script_hash)
except ValueError as e:
print(str(e))
return False
print(f"Added address {addr} as watch-only")
return True
def command_desc(self):
p1 = ParameterDesc('address', 'public NEO address to watch')
return CommandDesc('watch_addr', 'import a public address as watch only', [p1])
class CommandWalletImportMultisigAddr(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) < 3:
print("Please specify the minimum required parameters")
return False
pubkey_in_wallet = arguments[0]
if not PromptUtils.is_valid_public_key(pubkey_in_wallet):
print("Invalid public key format")
return False
key_script_hash = Crypto.ToScriptHash(pubkey_in_wallet, unhex=True)
if not wallet.ContainsKeyHash(key_script_hash):
print("Supplied first public key does not exist in own wallet.")
return False
try:
min_signature_cnt = int(arguments[1])
except ValueError:
print(f"Invalid minimum signature count value: {arguments[1]}")
return False
if min_signature_cnt < 1:
print("Minimum signatures count cannot be lower than 1")
return False
# validate minimum required signing key count
signing_keys = arguments[2:]
signing_keys.append(pubkey_in_wallet)
len_signing_keys = len(signing_keys)
if len_signing_keys < min_signature_cnt:
# we need at least 2 public keys in total otherwise it's just a regular address.
# 1 pub key is from an address in our own wallet, a secondary key can come from any place.
print(f"Missing remaining signing keys. Minimum required: {min_signature_cnt} given: {len_signing_keys}")
return False
# validate remaining pub keys
for key in signing_keys:
if not PromptUtils.is_valid_public_key(key):
print(f"Invalid signing key {key}")
return False
# validate that all signing keys are unique
if len(signing_keys) > len(set(signing_keys)):
print("Provided signing keys are not unique")
return False
verification_contract = Contract.CreateMultiSigContract(key_script_hash, min_signature_cnt, signing_keys)
address = verification_contract.Address
wallet.AddContract(verification_contract)
print(f"Added multi-sig contract address {address} to wallet")
return True
def command_desc(self):
p1 = ParameterDesc('own pub key', 'public key in your own wallet (use `wallet` to find the information)')
p2 = ParameterDesc('sign_cnt', 'minimum number of signatures required for using the address (min is: 1)')
p3 = ParameterDesc('signing key n', 'all remaining signing public keys')
return CommandDesc('multisig_addr', 'import a multi-signature address', [p1, p2, p3])
class CommandWalletImportToken(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
if len(arguments) != 1:
print("Please specify the required parameter")
return
try:
contract_hash = UInt160.ParseString(arguments[0]).ToBytes()
except Exception:
print(f"Invalid contract hash: {arguments[0]}")
return
return ImportToken(PromptData.Wallet, contract_hash)
def command_desc(self):
p1 = ParameterDesc('contract_hash', 'token script hash')
return CommandDesc('token', 'import a token', [p1])
class CommandWalletImportContractAddr(CommandBase):
def __init__(self):
super().__init__()
def execute(self, arguments):
wallet = PromptData.Wallet
if len(arguments) != 2:
print("Please specify the required parameters")
return
try:
contract_hash = UInt160.ParseString(arguments[0]).ToBytes()
except Exception:
print(f"Invalid contract hash: {arguments[0]}")
return
pubkey = arguments[1]
if not PromptUtils.is_valid_public_key(pubkey):
print(f"Invalid pubkey: {arguments[1]}")
return
pubkey_script_hash = Crypto.ToScriptHash(pubkey, unhex=True)
return ImportContractAddr(wallet, contract_hash, pubkey_script_hash)
def command_desc(self):
p1 = ParameterDesc('contract_hash', 'contract script hash')
p2 = ParameterDesc('pubkey', 'pubkey of the contract')
return CommandDesc('contract_addr', 'import a contract address', [p1, p2])
def ImportToken(wallet, contract_hash):
if wallet is None:
print("please open a wallet")
return
contract = Blockchain.Default().GetContract(contract_hash)
if contract:
hex_script = binascii.hexlify(contract.Code.Script)
token = NEP5Token.NEP5Token(script=hex_script)
result = token.Query()
if result:
wallet.AddNEP5Token(token)
print("added token %s " % json.dumps(token.ToJson(), indent=4))
return token
else:
print("Could not import token")
else:
print("Could not find the contract hash")
| |
"""
Unittest for the basic stitcher.
"""
import json
import unittest
import sys
import networkx as nx
from networkx.readwrite import json_graph
from stitcher import stitch
class TestFilteringConditions(unittest.TestCase):
"""
Tests the filter functions and validates that the right candidates are
eliminated.
"""
def setUp(self):
self.container = nx.DiGraph()
self.container.add_node('1', **{'type': 'a', 'foo': 'x', 'bar': 5,
'retest': 'abcde'})
self.container.add_node('2', **{'type': 'a', 'foo': 'y', 'bar': 7})
self.container.add_node('3', **{'type': 'b', 'foo': 'x'})
self.container.add_edge('1', '2')
self.container.add_edge('2', '3')
self.request = nx.DiGraph()
self.request.add_node('a', **{'type': 'x'})
self.request.add_node('b', **{'type': 'y'})
self.request.add_edge('a', 'b')
self.cut = stitch.GlobalStitcher({'x': 'a', 'y': 'b', 'z': 'c'})
def assertItemsEqual(self, first, second):
"""
Python2->3 fix...
"""
if sys.version_info[0] >= 3:
self.assertCountEqual(first, second)
else:
super(TestFilteringConditions, self).assertItemsEqual(first,
second,
'override.')
def test_filter_for_sanity(self):
"""
Test filter for sanity.
"""
# none given as condition should results input = output
inp = [1, 2, 3]
out = stitch.my_filter(self.container, [1, 2, 3], None)
self.assertEqual(inp, out)
# node a requires target node to have attribute foo set to y
condy = {'attributes': [('eq', ('a', ('foo', 'y')))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
# only 1 option left - a->2, b->3!
self.assertEqual(len(res1), 1)
self.assertItemsEqual([('1', '2'), ('a', 'b'), ('2', '3'), ('b', '3'),
('a', '2')], res1[0].edges())
# node a requires target node to have attribute foo set to 5
condy = {'attributes': [('eq', ('a', ('bar', 5)))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
# only 1 option left - a->1, b->3!
self.assertEqual(len(res1), 1)
self.assertItemsEqual([('1', '2'), ('a', 'b'), ('2', '3'), ('b', '3'),
('a', '1')], res1[0].edges())
# node a requires target node to have attribute foo not set to y
condy = {'attributes': [('neq', ('a', ('foo', 'y')))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
# only 1 option left - a->1, b->3!
self.assertEqual(len(res1), 1)
self.assertItemsEqual([('1', '2'), ('a', 'b'), ('2', '3'), ('b', '3'),
('a', '1')], res1[0].edges())
# node a requires target node to have attribute foo not set to 5
condy = {'attributes': [('neq', ('a', ('bar', 5)))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
# only 1 option left - a->2, b->3!
self.assertEqual(len(res1), 1)
self.assertItemsEqual([('1', '2'), ('a', 'b'), ('2', '3'), ('b', '3'),
('a', '2')], res1[0].edges())
# node a requires target node to have an attribute bar with value > 5
condy = {'attributes': [('lg', ('a', ('bar', 5)))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
# only 1 option left - a->2, b->3!
self.assertEqual(len(res1), 1)
self.assertItemsEqual([('1', '2'), ('a', 'b'), ('2', '3'), ('b', '3'),
('a', '2')], res1[0].edges())
# node a requires target node to have an attribute xyz with value > 5
condy = {'attributes': [('lg', ('a', ('xyz', 5)))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
# no stitch possible
self.assertEqual(len(res1), 0)
# node a requires target node to have an attribute bar with value < 6
condy = {'attributes': [('lt', ('a', ('bar', 6)))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
# only 1 option left - a->1, b->3!
self.assertEqual(len(res1), 1)
self.assertItemsEqual([('1', '2'), ('a', 'b'), ('2', '3'), ('b', '3'),
('a', '1')], res1[0].edges())
# node a requires target node to have an attribute xyz with value < 5
condy = {'attributes': [('lt', ('a', ('xyz', 5)))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
# no stitch possible
self.assertEqual(len(res1), 0)
# node a requires target node to have an attribute retest which starts
# with an 'a'
condy = {'attributes': [('regex', ('a', ('retest', '^a')))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
# only 1 option left - a->1, b->3!
self.assertEqual(len(res1), 1)
self.assertItemsEqual([('1', '2'), ('a', 'b'), ('2', '3'), ('b', '3'),
('a', '1')], res1[0].edges())
# node a requires target node to have an attribute retest which starts
# with an 'c'
condy = {'attributes': [('regex', ('a', ('retest', '^c')))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
# no options left.
self.assertEqual(len(res1), 0)
self.container.add_node('4', **{'type': 'b', 'foo': 'x'})
self.container.add_edge('3', '4')
self.request.add_node('c', **{'type': 'y'})
self.request.add_edge('b', 'c')
# node c & b to be stitched to same target!
condy = {'compositions': [('same', ('b', 'c'))],
'attributes': [('eq', ('a', ('foo', 'x')))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
condy = {'compositions': [('same', ('c', 'b'))],
'attributes': [('eq', ('a', ('foo', 'x')))]}
res2 = self.cut.stitch(self.container, self.request, conditions=condy)
# only 2 options left: b&c->3 or b&c->4
self.assertEqual(len(res1), 2)
self.assertEqual(len(res2), 2)
# should be identical.
self.assertItemsEqual(res1[0].edges(), res2[0].edges())
self.assertItemsEqual(res1[1].edges(), res2[1].edges())
self.assertItemsEqual([('a', '1'), ('a', 'b'), ('b', 'c'), ('1', '2'),
('3', '4'), ('2', '3'), ('b', '3'), ('c', '3')],
res1[0].edges())
self.assertItemsEqual([('a', '1'), ('a', 'b'), ('b', 'c'), ('1', '2'),
('3', '4'), ('2', '3'), ('b', '4'), ('c', '4')],
res1[1].edges())
# node a & b to be stitched to different targets!
condy = {'compositions': [('diff', ('b', 'c'))],
'attributes': [('eq', ('a', ('foo', 'x')))]}
res1 = self.cut.stitch(self.container, self.request, conditions=condy)
condy = {'compositions': [('diff', ('c', 'b'))],
'attributes': [('eq', ('a', ('foo', 'x')))]}
res2 = self.cut.stitch(self.container, self.request, conditions=condy)
# only 2 options left: b->3 & c->4 or b->4 & c->3
self.assertEqual(len(res1), 2)
self.assertEqual(len(res2), 2)
# should be identical.
self.assertItemsEqual(res1[0].edges(), res2[0].edges())
self.assertItemsEqual(res1[1].edges(), res2[1].edges())
either_called = False
if ('b', '4') in res1[0].edges():
self.assertItemsEqual([('a', '1'), ('a', 'b'), ('b', 'c'),
('1', '2'), ('3', '4'), ('2', '3'),
('b', '4'), ('c', '3')],
res1[0].edges())
self.assertItemsEqual([('a', '1'), ('a', 'b'), ('b', 'c'),
('1', '2'), ('3', '4'), ('2', '3'),
('b', '3'), ('c', '4')],
res1[1].edges())
either_called = True
elif ('b', '4') in res1[1].edges():
self.assertItemsEqual([('a', '1'), ('a', 'b'), ('b', 'c'),
('1', '2'), ('3', '4'), ('2', '3'),
('b', '4'), ('c', '3')],
res1[1].edges())
self.assertItemsEqual([('a', '1'), ('a', 'b'), ('b', 'c'),
('1', '2'), ('3', '4'), ('2', '3'),
('b', '3'), ('c', '4')],
res1[0].edges())
either_called = True
self.assertTrue(either_called)
def test_attr_sharing_filter_for_sanity(self):
"""
Test filter for sanity with a more complex setup.
"""
container = nx.DiGraph()
container.add_node('a', **{'type': 'a', 'group': '1', 'geo': 'eu'})
container.add_node('b', **{'type': 'b', 'group': '1', 'geo': 'us'})
container.add_node('c', **{'type': 'a', 'group': '2'})
container.add_node('d', **{'type': 'b', 'group': '2'})
container.add_node('e', **{'type': 'b'})
container.add_edge('a', 'b')
container.add_edge('b', 'c')
container.add_edge('c', 'd')
request = nx.DiGraph()
request.add_node('1', **{'type': 'x'})
request.add_node('2', **{'type': 'y'})
request.add_node('3', **{'type': 'y'})
request.add_edge('1', '2')
request.add_edge('1', '3')
condy = {'compositions': [('share', ('group', ['1', '2'])),
('same', ('2', '3'))],
'attributes': [('eq', ('1', ('geo', 'eu')))]}
res1 = self.cut.stitch(container, request, conditions=condy)
# only one option possible
self.assertEqual(len(res1), 1)
# verify stitches
self.assertIn(('1', 'a'), res1[0].edges())
self.assertIn(('2', 'b'), res1[0].edges())
self.assertIn(('3', 'b'), res1[0].edges())
condy = {'compositions': [('nshare', ('group', ['2', '3']))],
'attributes': [('eq', ('1', ('geo', 'eu'))),
('eq', ('2', ('geo', 'us')))]}
res1 = self.cut.stitch(container, request, conditions=condy)
# only one option possible
self.assertEqual(len(res1), 1)
# verify stitches
self.assertIn(('1', 'a'), res1[0].edges())
self.assertIn(('2', 'b'), res1[0].edges())
self.assertIn(('3', 'd'), res1[0].edges())
class TestGlobalStitcher(unittest.TestCase):
"""
Test the global stitcher class.
"""
def setUp(self):
container_tmp = json.load(open('data/container.json'))
self.container = json_graph.node_link_graph(container_tmp,
directed=True)
request_tmp = json.load(open('data/request.json'))
self.request = json_graph.node_link_graph(request_tmp,
directed=True)
rels = json.load(open('data/stitch.json'))
self.cut = stitch.GlobalStitcher(rels)
def test_stitch_for_success(self):
"""
Test stitch for success.
"""
self.cut.stitch(self.container, self.request)
def test_stitch_for_sanity(self):
"""
Test stitch for sanity.
"""
# basic stitch test
res1 = self.cut.stitch(self.container, self.request)
self.assertTrue(len(res1) > 0)
self.assertEqual(res1[0].number_of_edges(),
self.container.number_of_edges() + 5)
self.assertEqual(res1[0].number_of_nodes(),
self.container.number_of_nodes() + 3)
# let's add a node to the request which does not require to be
# stitched to the container. Hence added edges = 3!
self.request.add_node('n', **{'type': 'foo', 'rank': 7})
self.request.add_edge('k', 'n')
self.request.add_edge('n', 'l')
self.assertTrue(len(res1) > 0)
self.assertEqual(res1[0].number_of_edges(),
self.container.number_of_edges() + 5)
self.assertEqual(res1[0].number_of_nodes(),
self.container.number_of_nodes() + 3)
| |
import json
import re
from requests.exceptions import HTTPError
import responses
from keen.tests.base_test_case import BaseTestCase
from keen.client import KeenClient
from keen import exceptions
class SavedQueryTests(BaseTestCase):
def setUp(self):
super(SavedQueryTests, self).setUp()
self.exp_project_id = "xxxx1234"
exp_master_key = "abcd3456"
self.client = KeenClient(
project_id=self.exp_project_id,
read_key="efgh7890",
master_key=exp_master_key
)
def test_get_all_saved_queries_keys(self):
client = KeenClient(project_id="123123")
self.assertRaises(
exceptions.InvalidEnvironmentError, client.saved_queries.all
)
@responses.activate
def test_get_all_saved_queries(self):
saved_queries_response = [
{ "query_name": "first-saved-query", "query": {} },
{ "query_name": "second-saved-query", "query": {} }
]
url = "{0}/{1}/projects/{2}/queries/saved".format(
self.client.api.base_url,
self.client.api.api_version,
self.exp_project_id
)
responses.add(
responses.GET, url, status=200, json=saved_queries_response
)
all_saved_queries = self.client.saved_queries.all()
self.assertEquals(all_saved_queries, saved_queries_response)
def test_get_one_saved_query_keys(self):
client = KeenClient(project_id="123123")
self.assertRaises(
exceptions.InvalidEnvironmentError,
lambda: client.saved_queries.get("saved-query-name")
)
@responses.activate
def test_get_one_saved_query(self):
saved_queries_response = {
"query_name": "saved-query-name"
}
url = "{0}/{1}/projects/{2}/queries/saved/saved-query-name".format(
self.client.api.base_url,
self.client.api.api_version,
self.exp_project_id
)
responses.add(
responses.GET, url, status=200, json=saved_queries_response
)
saved_query = self.client.saved_queries.get("saved-query-name")
self.assertEquals(saved_query, saved_queries_response)
def test_create_saved_query_master_key(self):
client = KeenClient(project_id="123123")
self.assertRaises(
exceptions.InvalidEnvironmentError,
lambda: client.saved_queries.create("saved-query-name", {})
)
@responses.activate
def test_create_saved_query(self):
saved_queries_response = {
"query_name": "saved-query-name"
}
url = "{0}/{1}/projects/{2}/queries/saved/saved-query-name".format(
self.client.api.base_url,
self.client.api.api_version,
self.exp_project_id
)
responses.add(
responses.PUT, url, status=201, json=saved_queries_response
)
saved_query = self.client.saved_queries.create("saved-query-name", saved_queries_response)
self.assertEqual(saved_query, saved_queries_response)
@responses.activate
def test_update_saved_query(self):
unacceptable_attr = "run_information"
metadata_attr_name = "metadata"
original_query = {
"query_name": "saved-query-name",
"refresh_rate": 14400,
"query": {
"analysis_type": "average",
"event_collection": "TheCollection",
"target_property": "TheProperty",
"timeframe": "this_2_weeks"
},
metadata_attr_name: { "foo": "bar" },
unacceptable_attr: { "foo": "bar" }
}
url = "{0}/{1}/projects/{2}/queries/saved/saved-query-name".format(
self.client.api.base_url,
self.client.api.api_version,
self.exp_project_id
)
responses.add(
responses.GET, url, status=200, json=original_query
)
updated_query = { "query": {} }
new_analysis_type = "sum"
updated_query["query"]["analysis_type"] = new_analysis_type
def request_callback(request):
payload = json.loads(request.body)
# Ensure update() round-trips some necessary things like "metadata"
self.assertEqual(payload[metadata_attr_name], original_query[metadata_attr_name])
# Ensure update() doesn't pass unacceptable attributes
self.assertNotIn(unacceptable_attr, payload)
# Ensure update() merges deep updates
self.assertEqual(payload["query"]["analysis_type"], new_analysis_type)
payload["query"]["analysis_type"] = "average"
payload[unacceptable_attr] = original_query[unacceptable_attr]
self.assertEqual(payload, original_query)
headers = {}
return (200, headers, json.dumps(updated_query))
responses.add_callback(
responses.PUT,
url,
callback=request_callback,
content_type='application/json',
)
saved_query = self.client.saved_queries.update("saved-query-name", updated_query)
self.assertEqual(saved_query, updated_query)
@responses.activate
def test_update_full_saved_query(self):
saved_queries_response = {
"query_name": "saved-query-name",
"refresh_rate": 14400,
"query": {
"analysis_type": "average",
"event_collection": "TheCollection",
"target_property": "TheProperty",
"timeframe": "this_2_weeks"
}
}
url = "{0}/{1}/projects/{2}/queries/saved/saved-query-name".format(
self.client.api.base_url,
self.client.api.api_version,
self.exp_project_id
)
# Unlike update(), update_full() should not be fetching the existing definition.
exception = HTTPError("No GET expected when performing a full update.")
responses.add(responses.GET, re.compile(".*"), body=exception)
def request_callback(request):
payload = json.loads(request.body)
# Ensure update_full() passes along the unaltered complete Saved/Cached Query def.
self.assertEqual(payload, saved_queries_response)
headers = {}
return (200, headers, json.dumps(saved_queries_response))
responses.add_callback(
responses.PUT,
url,
callback=request_callback,
content_type='application/json',
)
saved_query = self.client.saved_queries.update_full("saved-query-name", saved_queries_response)
self.assertEqual(saved_query, saved_queries_response)
def test_delete_saved_query_master_key(self):
client = KeenClient(project_id="123123", read_key="123123")
self.assertRaises(
exceptions.InvalidEnvironmentError,
lambda: client.saved_queries.delete("saved-query-name")
)
@responses.activate
def test_delete_saved_query(self):
url = "{0}/{1}/projects/{2}/queries/saved/saved-query-name".format(
self.client.api.base_url,
self.client.api.api_version,
self.exp_project_id
)
responses.add(
responses.DELETE, url, status=204, json=""
)
response = self.client.saved_queries.delete("saved-query-name")
self.assertEquals(response, True)
@responses.activate
def test_saved_query_results(self):
saved_queries_response = {
"query_name": "saved-query-name",
"results": {}
}
url = "{0}/{1}/projects/{2}/queries/saved/saved-query-name/result".format(
self.client.api.base_url,
self.client.api.api_version,
self.exp_project_id
)
responses.add(
responses.GET, url, status=209, json=saved_queries_response
)
response = self.client.saved_queries.results("saved-query-name")
self.assertEquals(response, saved_queries_response)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SecurityPartnerProvidersOperations:
"""SecurityPartnerProvidersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
security_partner_provider_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
security_partner_provider_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified Security Partner Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
async def get(
self,
resource_group_name: str,
security_partner_provider_name: str,
**kwargs: Any
) -> "_models.SecurityPartnerProvider":
"""Gets the specified Security Partner Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityPartnerProvider, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.SecurityPartnerProvider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
security_partner_provider_name: str,
parameters: "_models.SecurityPartnerProvider",
**kwargs: Any
) -> "_models.SecurityPartnerProvider":
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecurityPartnerProvider')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
security_partner_provider_name: str,
parameters: "_models.SecurityPartnerProvider",
**kwargs: Any
) -> AsyncLROPoller["_models.SecurityPartnerProvider"]:
"""Creates or updates the specified Security Partner Provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:param parameters: Parameters supplied to the create or update Security Partner Provider
operation.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.SecurityPartnerProvider
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SecurityPartnerProvider or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.SecurityPartnerProvider]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
security_partner_provider_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.SecurityPartnerProvider":
"""Updates tags of a Security Partner Provider resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param security_partner_provider_name: The name of the Security Partner Provider.
:type security_partner_provider_name: str
:param parameters: Parameters supplied to update Security Partner Provider tags.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityPartnerProvider, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.SecurityPartnerProvider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProvider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'securityPartnerProviderName': self._serialize.url("security_partner_provider_name", security_partner_provider_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityPartnerProvider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders/{securityPartnerProviderName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SecurityPartnerProviderListResult"]:
"""Lists all Security Partner Providers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityPartnerProviderListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.SecurityPartnerProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityPartnerProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/securityPartnerProviders'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.SecurityPartnerProviderListResult"]:
"""Gets all the Security Partner Providers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityPartnerProviderListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.SecurityPartnerProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityPartnerProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityPartnerProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/securityPartnerProviders'} # type: ignore
| |
#!/usr/bin/env python
"""
Copyright 2010-2017 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import math
import struct
import numpy as np
import matplotlib as mpl
mpl.rcParams['font.size'] = 10.
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.mlab as mlab
#from matplotlib.patches import Circle
from matplotlib import lines
from pyproj import Proj
# Import Broadband modules
import gislib as GS
import validation_cfg
from station_list import StationList
from install_cfg import InstallCfg
from Projection import Projection
import fault_utils
from extract_boundaries import ExtractBoundaries
import plot_config
# Constants
COLOR_CITY = 'r'
COLOR_HYPO = 'g'
COLOR_STA = 'c'
COLOR_TXT = 'k'
class PlotValueMap(object):
def __init__(self, station_file, sim_id=0, hypo=None):
self.station_file = station_file
self.sim_id = sim_id
self.install = InstallCfg.getInstance()
self.coast_file = os.path.join(self.install.A_PLOT_DATA_DIR,
"cali_coastline.mapgen")
if not os.path.isfile(self.coast_file):
self.coast_file = ""
self.value = "GOF"
self.stats = []
self.dx = 500.0 #100 mts grid resolution
self.spacing = [self.dx, self.dx]
self.hypo = hypo
self.dim = []
self.rbounds = []
self.nw = []
self.sw = []
self.se = []
self.ne = []
self.PLOT_MAP_LOC = [0.10, 0.15, 0.8, 0.8]
self.origin = []
self.offset = []
self.x_invert = False
self.y_invert = False
self.init_dims()
def init_dims(self):
a_stationlist = os.path.join(self.install.A_IN_DATA_DIR,
str(self.sim_id), self.station_file)
if not os.path.isfile(a_stationlist):
a_stationlist = os.path.join(os.getcwd(), self.station_file)
if not os.path.isfile(a_stationlist):
print("Error (plot_value_map): Unable to locate station file: "
"%s!" % self.station_file)
sys.exit()
self.station_file = a_stationlist
print("Using Station File: %s" % (self.station_file))
# a_statfile = (self.install.A_IN_DATA_DIR +
# "/%d/%s"%(self.sim_id,self.station_file))
slo = StationList(self.station_file)
site_list = slo.getStationList()
w_lon = 0.0
e_lon = 0.0
n_lat = 0.0
s_lat = 0.0
for sites in site_list:
slon = float(sites.lon)
slat = float(sites.lat)
if w_lon == 0.0:
w_lon = slon
elif slon < w_lon:
w_lon = slon
if e_lon == 0.0:
e_lon = slon
elif slon > e_lon:
e_lon = slon
if n_lat == 0.0:
n_lat = slat
elif slat > n_lat:
n_lat = slat
if s_lat == 0.0:
s_lat = slat
elif slat < s_lat:
s_lat = slat
self.rbounds = [(n_lat + 0.1), (s_lat - 0.1),
(e_lon + 0.1), (w_lon - 0.1)]
print("Region Bounds: ", self.rbounds)
self.nw = (self.rbounds[3], self.rbounds[0])
self.sw = (self.rbounds[3], self.rbounds[1])
self.se = (self.rbounds[2], self.rbounds[1])
self.ne = (self.rbounds[2], self.rbounds[0])
self.PLOT_MAP_LOC = [0.10, 0.15, 0.8, 0.8]
self.origin = self.nw # North - West Corner
self.x_invert = False
self.y_invert = True
rzone = 11
# if self.region != None:
# if self.region.getName() == "Northern California":
# rzone = 10
# print "Region : %s, UTM Zone: %d" % (self.region.getName(), rzone)
# else:
print("Region : None, UTM Zone: %d" % (rzone))
pobj = Proj(proj='utm', zone=rzone, ellps='WGS84')
self.offset = map(round, pobj(self.origin[0], self.origin[1]))
#Calculate region dimension in km
dim_y = math.ceil(GS.get_distance(self.nw, self.sw)) * (1000.0 / self.dx) #(KM*1000/dx)
dim_x = math.ceil(GS.get_distance(self.sw, self.se)) * (1000.0 / self.dx)
dim_z = 1.0 * (1000.0 / self.dx) #Just want to plot surface so we use 1 KM for Z
self.dim = [int(dim_x), int(dim_y), int(dim_z)]
# print "Self.dx, self.offset, self.dim:", self.dx, self.offset, self.dim
self.projobj = Projection(self.dx, self.dim, self.offset, "utm", rzone)
self.build_station_list(self.station_file)
self.boundfile = self.build_coastline(self.coast_file, self.projobj,
self.offset, self.dx, self.dim,
self.x_invert, self.y_invert)
return
def build_coastline(self, mapfile, proj, offsets, dx, dim, x_invert, y_invert):
if mapfile == "":
print("Warning (plot_Value_map): "
"Missing coast line data! Skipping coast line plot!")
return
boundfile = "%s/%d/boundaries.txt" % (self.install.A_TMP_DATA_DIR, self.sim_id)
#mapfile, outfile, proj, offsets, dx, dim
print("Mapfile is: %s" % (mapfile))
prog = ExtractBoundaries(mapfile, boundfile, proj, offsets,
dx, dim, x_invert, y_invert)
prog.run()
return boundfile
def build_station_list(self, station_file):
work_dir = os.getcwd()
proj = self.projobj
sfname = os.path.splitext(os.path.basename(station_file))[0]
fname = '%s/%s.txt' % (work_dir, sfname)
sfile = open(fname, 'w')
stats = []
# a_statfile = (self.install.A_IN_DATA_DIR +
# "/%d/%s"%(self.sim_id,self.station_file))
slo = StationList(self.station_file)
site_list = slo.getStationList()
for sites in site_list:
slon = float(sites.lon)
slat = float(sites.lat)
site = sites.scode
x, y = proj.get_xy_from_geo(slon, slat)
# if x < 0 or y >0:
# print "Station oob :", slon, slat, x, y
stat_data = (x, y)
stats.append(stat_data)
sfile.write("%-12s\t%f\t%f\t%f\t%f\n" % (site, slon, slat, x, y))
self.stats = stats
# Hypo
if self.hypo != [] and self.hypo != None:
self.hypo[0], self.hypo[1] = proj.get_xy_from_geo(self.hypo[0],
self.hypo[1])
sfile.close()
return
def get_plot_points(self, datalist):
x = []
y = []
z = []
for sx, sy, data in datalist:
x.append(sx)
y.append(sy)
z.append(data)
# print sx, sy, data
x = np.array(x)
y = np.array(y)
z = np.array(z)
assert x.ndim == y.ndim == z.ndim == 1 and len(x) == len(y) == len(z)
# Define grid and interpolate the rest
xi = np.linspace(0.0, float(self.dim[0]-1), self.dim[0])
yi = np.linspace(0.0, float((self.dim[1]-1)*-1), self.dim[1])
# print "Length of xi, yi:", len(xi), len(yi)
zi = mlab.griddata(x, y, z, xi, yi)
# print "Shape of zi:", zi.shape
# nmask = np.ma.count_masked(zi)
# if nmask > 0:
#print("info: griddata: %d of %d points are masked, not interpolated" %
# (nmask, zi.size))
return zi
def get_boundary_lines(self, boundfile):
boundaries = []
poly = []
readsize = struct.calcsize('iff')
ip = open(boundfile, 'rb')
packed = ip.read(readsize)
while packed != '':
data = struct.unpack('iff', packed)
if data[0] == 0:
if len(poly) > 1:
boundaries.append(poly)
poly = []
else:
x = (data[1]) #*float(self.dx)/1000.0)
y = (data[2]) #*float(self.dx)/1000.0)
poly.append([x, y])
packed = ip.read(readsize)
if len(poly) > 1:
boundaries.append(poly)
ip.close()
return boundaries
def plot_grid_array(self, fig, loc, points,
labels, units, cmap, norm, title,
ticks=None, invert_y=True):
ax = fig.add_axes(loc, frameon=True)
ax.set_title('%s' % (title))
ax.set_xlabel('%s (%s)' % (labels[0], units[0]))
ax.set_ylabel('%s (%s)' % (labels[1], units[1]))
# print "plot_grid_array: Points.shape", points.shape
ax.imshow(points, cmap=cmap, norm=norm, interpolation='nearest', alpha=0.8)
dims = points.shape
# Setup custom axis
ax.set_xlim(0, dims[1])
ax.set_ylim(0, dims[0])
if invert_y:
ax.invert_yaxis()
if ticks != None:
plt.xticks(ticks[0][0], ticks[0][1])
plt.yticks(ticks[1][0], ticks[1][1])
return 0
def plot_colorbar(self, loc, value_type, value_units, cmap, norm,
value_min, value_max, orient):
cax = plt.axes(loc)
# Compute ticks
ticks = []
num_ticks = 10
diff = (value_max - value_min) / float(num_ticks)
# print "Colorbar diff: ", diff
for i in xrange(0, num_ticks + 1):
# print (value_min + (i * diff))
ticks.append(value_min + (i * diff))
cax.set_title("%s (%s)" % (value_type, value_units))
mpl.colorbar.ColorbarBase(cax, cmap=cmap,
norm=norm,
ticks=ticks,
format='%2.1f',
orientation=orient)
return 0
def plot_site(self, fig, ax, label, point1, point2, sitetype, r):
grid_x = [point1]
grid_y = [point2]
if sitetype == 'sta':
fig.plot(grid_x, grid_y, color=COLOR_STA, marker='.')
elif sitetype == 'hypo':
#print "Plotting hypo at %d, %d" % (grid_x[0], grid_y[0])
fig.plot(grid_x, grid_y, color=COLOR_HYPO, marker='*',
markersize=10, markeredgecolor='red')
fig.text(point1, point2+2,
'%s' % (label), color=COLOR_HYPO,
horizontalalignment='center')
# cir = Circle( (point1,point2), radius=r, \
# edgecolor=COLOR_HYPO, facecolor=COLOR_TXT, alpha=0.5)
# ax.add_patch(cir)
# fig.text(point1, point2+r, \
# '%s' % ('Hypo'), color=COLOR_HYPO, \
# horizontalalignment='center')
elif sitetype == 'txt':
fig.text(point1, point2,
'%s' % (label), color=COLOR_TXT,
horizontalalignment='center')
else:
fig.plot(grid_x, grid_y, color=COLOR_CITY, marker='.')
fig.text(point1, point2+2,
'%s' % (label), color=COLOR_CITY,
horizontalalignment='center')
return
def plot_boundaries(self, fig, boundfile):
boundaries = []
boundaries = self.get_boundary_lines(boundfile)
for poly in boundaries:
prevline = []
for line in poly:
# print line
if prevline != []:
grid_x = [prevline[0], line[0]]
grid_y = [prevline[1], line[1]]
# print "Line:", prevline, line
cline = lines.Line2D(grid_x, grid_y, lw=1., color='k')
# fig.plot(grid_x, grid_y, '-', color='k')
# cline.set_clip_on(False)
# cline.set_axes(fig)
fig.add_line(cline)
prevline = line
return
def do_plot(self, datalist, title, outfile):
plot_x_size = 8
plot_y_size = 6
fig = plt.figure(figsize=(plot_x_size, plot_y_size))
# Get plot points
points = self.get_plot_points(datalist)
if points is None:
print("ERROR (plot_valu_map): Failed to get plot points!")
sys.exit(-1)
value_min = points.min()
value_max = points.max()
# if the values are between 0-100, used fixed color bar
if value_min >= 0:
value_min = 0
if value_max <= 100:
value_max = 100
# print "Colorbar range: %f to %f" % (value_min, value_max)
cmap = cm.gist_rainbow_r
norm = mcolors.Normalize(vmin=value_min, vmax=value_max)
# Redefine tick labels
ticks = [[[], []], [[], []]]
ticks[0][0] = []
ticks[0][1] = []
for i in xrange(0, self.dim[0] * int(self.spacing[0]) + 1, 50000):
ticks[0][0].append(i / self.spacing[0])
ticks[0][1].append('%d' % (i / 1000))
ticks[1][0] = []
ticks[1][1] = []
for i in xrange(0, self.dim[1] * int(self.spacing[1]) + 1, 50000):
ticks[1][0].append(i / self.spacing[1])
ticks[1][1].append('%d' % (i / 1000))
# y_scale = self.dim[1]/float(self.dim[0])
# x_scale = 1.0
# if (y_scale > 1.0):
# x_scale = 1/y_scale
# y_scale = 1.0
# y_scale = y_scale * plot_x_size / float(plot_y_size)
# print "X-scale: %lf" % (x_scale)
# print "Y-scale: %lf" % (y_scale)
origin_x = 0.05
origin_y = 0.18 #(1.0-(0.9*y_scale))/2 + 0.08
x_length = 0.85 # * x_scale
y_length = 0.74 # * y_scale
loc = [origin_x, origin_y, x_length, y_length]
# Plot the depth map
invert_y = True
self.plot_grid_array(fig, loc, points,
['X', 'Y'], ['km', 'km'],
cmap, norm, title,
ticks, invert_y)
#plot Sites
radius = 1.0 * float(self.dx) / 1000.0
ax = fig.get_axes()
for site in self.stats:
self.plot_site(plt, ax[0], "", site[0], (-1*site[1]), 'sta', radius)
#plot coast line
self.plot_boundaries(ax[0], self.boundfile)
#plot Hypo
if self.hypo != [] and self.hypo is not None:
self.plot_site(plt, ax[0], "", self.hypo[0],
(-1 * self.hypo[1]), 'hypo', radius * 10.0)
# Plot colorbar
value_units = '%'
c_orient = "horizontal"
if c_orient == "horizontal":
cloc = [origin_x + (x_length * 0.1), origin_y - 0.13,
(x_length * 0.8), 0.02]
# else:
# if invert_y:
# cloc = [(origin_x+x_length),
# origin_y-y_length+ 0.05, 0.02, (y_length*0.8), ]
# else:
# cloc = [(origin_x+x_length+0.02),
# origin_y+ 0.05, 0.02, (y_length*0.8), ]
# print cloc
self.plot_colorbar(cloc, self.value, value_units, cmap, norm,
value_min, value_max, c_orient)
outfile.replace(' ', '_')
outfile.replace('/', '_')
print("Saving plot file %s" % (outfile))
plt.savefig(outfile, dpi=plot_config.dpi)
plt.show()
return 0
def run(self, data, label):
self.label = label
self.data = data
# a_indir = "%s/%d" % (self.install.A_IN_DATA_DIR, self.sim_id)
a_outdir = os.path.join(self.install.A_OUT_DATA_DIR, str(self.sim_id))
outfile = "%s/%d_%s_map.png" % (a_outdir, self.sim_id, label)
title = "Run %d - %s Map" % (self.sim_id, label)
self.log = "%s/%d/%d.plot_value_map.log" % (self.install.A_OUT_LOG_DIR,
self.sim_id, self.sim_id)
if len(data) != len(self.stats):
print("ERROR (plot_valu_map): Data file length "
"(%d) is not equal to number of stations (%d)!" \
% (len(data), len(self.stats)))
sys.exit(-1)
iindex = 0
datalist = []
for value in self.stats:
dval = (float(value[0]), float(value[1]), data[iindex][0])
datalist.append(dval)
iindex += 1
self.do_plot(datalist, title, outfile)
if __name__ == '__main__':
STATION_FILE = sys.argv[1]
DATA = sys.argv[2]
LABEL = sys.argv[3]
VNAME = sys.argv[4]
print("Validation Event - %s" % (VNAME))
HYPO = []
if VNAME is not None:
VAL_OBJ = validation_cfg.VE_EVENTS.get_event_by_name(VNAME)
SRF_FILE = VAL_OBJ.get_input("GP", "srf")
HYPO = fault_utils.get_hypocenter(SRF_FILE)
PVM = PlotValueMap(STATION_FILE, int(sys.argv[5]), HYPO)
PVM.run(DATA, LABEL)
| |
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.compute.rpcapi
"""
import contextlib
import mock
from oslo.config import cfg
from oslo.serialization import jsonutils
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova.objects import block_device as objects_block_dev
from nova.objects import network_request as objects_network_request
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit.fake_instance import fake_instance_obj
CONF = cfg.CONF
class ComputeRpcAPITestCase(test.TestCase):
def setUp(self):
super(ComputeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
instance_attr = {'host': 'fake_host',
'instance_type_id': 1}
self.fake_instance_obj = fake_instance_obj(self.context,
**instance_attr)
self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj)
self.fake_volume_bdm = jsonutils.to_primitive(
fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'volume', 'destination_type': 'volume',
'instance_uuid': self.fake_instance['uuid'],
'volume_id': 'fake-volume-id'}))
def test_serialized_instance_has_name(self):
self.assertIn('name', self.fake_instance)
def _test_compute_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = kwargs.pop('rpcapi_class', compute_rpcapi.ComputeAPI)()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.compute_topic)
orig_prepare = rpcapi.client.prepare
expected_version = kwargs.pop('version', rpcapi.client.target.version)
expected_kwargs = kwargs.copy()
if ('requested_networks' in expected_kwargs and
expected_version == '3.23'):
expected_kwargs['requested_networks'] = []
for requested_network in kwargs['requested_networks']:
expected_kwargs['requested_networks'].append(
(requested_network.network_id,
str(requested_network.address),
requested_network.port_id))
if 'host_param' in expected_kwargs:
expected_kwargs['host'] = expected_kwargs.pop('host_param')
else:
expected_kwargs.pop('host', None)
expected_kwargs.pop('destination', None)
cast_and_call = ['confirm_resize', 'stop_instance']
if rpc_method == 'call' and method in cast_and_call:
if method == 'confirm_resize':
kwargs['cast'] = False
else:
kwargs['do_cast'] = False
if 'host' in kwargs:
host = kwargs['host']
elif 'destination' in kwargs:
host = kwargs['destination']
elif 'instances' in kwargs:
host = kwargs['instances'][0]['host']
else:
host = kwargs['instance']['host']
with contextlib.nested(
mock.patch.object(rpcapi.client, rpc_method),
mock.patch.object(rpcapi.client, 'prepare'),
mock.patch.object(rpcapi.client, 'can_send_version'),
) as (
rpc_mock, prepare_mock, csv_mock
):
prepare_mock.return_value = rpcapi.client
if 'return_bdm_object' in kwargs:
del kwargs['return_bdm_object']
rpc_mock.return_value = objects_block_dev.BlockDeviceMapping()
elif rpc_method == 'call':
rpc_mock.return_value = 'foo'
else:
rpc_mock.return_value = None
csv_mock.side_effect = (
lambda v: orig_prepare(version=v).can_send_version())
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, rpc_mock.return_value)
prepare_mock.assert_called_once_with(version=expected_version,
server=host)
rpc_mock.assert_called_once_with(ctxt, method, **expected_kwargs)
def test_add_aggregate_host(self):
self._test_compute_api('add_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={})
def test_add_fixed_ip_to_instance(self):
self._test_compute_api('add_fixed_ip_to_instance', 'cast',
instance=self.fake_instance_obj, network_id='id',
version='3.12')
def test_attach_interface(self):
self._test_compute_api('attach_interface', 'call',
instance=self.fake_instance_obj, network_id='id',
port_id='id2', version='3.17', requested_ip='192.168.1.50')
def test_attach_volume(self):
self._test_compute_api('attach_volume', 'cast',
instance=self.fake_instance_obj, volume_id='id',
mountpoint='mp', bdm=self.fake_volume_bdm, version='3.16')
def test_change_instance_metadata(self):
self._test_compute_api('change_instance_metadata', 'cast',
instance=self.fake_instance_obj, diff={}, version='3.7')
def test_check_can_live_migrate_destination(self):
self._test_compute_api('check_can_live_migrate_destination', 'call',
instance=self.fake_instance_obj,
destination='dest', block_migration=True,
disk_over_commit=True, version='3.32')
def test_check_can_live_migrate_source(self):
self._test_compute_api('check_can_live_migrate_source', 'call',
instance=self.fake_instance_obj,
dest_check_data={"test": "data"}, version='3.32')
def test_check_instance_shared_storage(self):
self._test_compute_api('check_instance_shared_storage', 'call',
instance=self.fake_instance_obj, data='foo',
version='3.29')
def test_confirm_resize_cast(self):
self._test_compute_api('confirm_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'foo'},
host='host', reservations=list('fake_res'))
def test_confirm_resize_call(self):
self._test_compute_api('confirm_resize', 'call',
instance=self.fake_instance_obj, migration={'id': 'foo'},
host='host', reservations=list('fake_res'))
def test_detach_interface(self):
self._test_compute_api('detach_interface', 'cast',
version='3.17', instance=self.fake_instance_obj,
port_id='fake_id')
def test_detach_volume(self):
self._test_compute_api('detach_volume', 'cast',
instance=self.fake_instance_obj, volume_id='id',
version='3.25')
def test_finish_resize(self):
self._test_compute_api('finish_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'foo'},
image='image', disk_info='disk_info', host='host',
reservations=list('fake_res'))
def test_finish_revert_resize(self):
self._test_compute_api('finish_revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host', reservations=list('fake_res'))
def test_get_console_output(self):
self._test_compute_api('get_console_output', 'call',
instance=self.fake_instance_obj, tail_length='tl',
version='3.28')
def test_get_console_pool_info(self):
self._test_compute_api('get_console_pool_info', 'call',
console_type='type', host='host')
def test_get_console_topic(self):
self._test_compute_api('get_console_topic', 'call', host='host')
def test_get_diagnostics(self):
self._test_compute_api('get_diagnostics', 'call',
instance=self.fake_instance_obj, version='3.18')
def test_get_instance_diagnostics(self):
self._test_compute_api('get_instance_diagnostics', 'call',
instance=self.fake_instance, version='3.31')
def test_get_vnc_console(self):
self._test_compute_api('get_vnc_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='3.2')
def test_get_spice_console(self):
self._test_compute_api('get_spice_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='3.1')
def test_get_rdp_console(self):
self._test_compute_api('get_rdp_console', 'call',
instance=self.fake_instance_obj, console_type='type',
version='3.10')
def test_get_serial_console(self):
self._test_compute_api('get_serial_console', 'call',
instance=self.fake_instance, console_type='serial',
version='3.34')
def test_validate_console_port(self):
self._test_compute_api('validate_console_port', 'call',
instance=self.fake_instance_obj, port="5900",
console_type="novnc", version='3.3')
def test_host_maintenance_mode(self):
self._test_compute_api('host_maintenance_mode', 'call',
host_param='param', mode='mode', host='host')
def test_host_power_action(self):
self._test_compute_api('host_power_action', 'call', action='action',
host='host')
def test_inject_network_info(self):
self._test_compute_api('inject_network_info', 'cast',
instance=self.fake_instance_obj)
def test_live_migration(self):
self._test_compute_api('live_migration', 'cast',
instance=self.fake_instance_obj, dest='dest',
block_migration='blockity_block', host='tsoh',
migrate_data={}, version='3.26')
def test_post_live_migration_at_destination(self):
self._test_compute_api('post_live_migration_at_destination', 'cast',
instance=self.fake_instance_obj,
block_migration='block_migration', host='host', version='3.14')
def test_pause_instance(self):
self._test_compute_api('pause_instance', 'cast',
instance=self.fake_instance_obj)
def test_soft_delete_instance(self):
self._test_compute_api('soft_delete_instance', 'cast',
instance=self.fake_instance_obj,
reservations=['uuid1', 'uuid2'])
def test_swap_volume(self):
self._test_compute_api('swap_volume', 'cast',
instance=self.fake_instance_obj, old_volume_id='oldid',
new_volume_id='newid')
def test_restore_instance(self):
self._test_compute_api('restore_instance', 'cast',
instance=self.fake_instance_obj, version='3.20')
def test_pre_live_migration(self):
self._test_compute_api('pre_live_migration', 'call',
instance=self.fake_instance_obj,
block_migration='block_migration', disk='disk', host='host',
migrate_data=None, version='3.19')
def test_prep_resize(self):
self._test_compute_api('prep_resize', 'cast',
instance=self.fake_instance_obj, instance_type='fake_type',
image='fake_image', host='host',
reservations=list('fake_res'),
request_spec='fake_spec',
filter_properties={'fakeprop': 'fakeval'},
node='node')
def test_reboot_instance(self):
self.maxDiff = None
self._test_compute_api('reboot_instance', 'cast',
instance=self.fake_instance_obj,
block_device_info={},
reboot_type='type')
def test_rebuild_instance(self):
self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
injected_files='None', image_ref='None', orig_image_ref='None',
bdms=[], instance=self.fake_instance_obj, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, version='3.21')
def test_reserve_block_device_name(self):
self._test_compute_api('reserve_block_device_name', 'call',
instance=self.fake_instance_obj, device='device',
volume_id='id', disk_bus='ide', device_type='cdrom',
version='3.35', return_bdm_object=True)
def refresh_provider_fw_rules(self):
self._test_compute_api('refresh_provider_fw_rules', 'cast',
host='host')
def test_refresh_security_group_rules(self):
self._test_compute_api('refresh_security_group_rules', 'cast',
rpcapi_class=compute_rpcapi.SecurityGroupAPI,
security_group_id='id', host='host')
def test_refresh_security_group_members(self):
self._test_compute_api('refresh_security_group_members', 'cast',
rpcapi_class=compute_rpcapi.SecurityGroupAPI,
security_group_id='id', host='host')
def test_remove_aggregate_host(self):
self._test_compute_api('remove_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={})
def test_remove_fixed_ip_from_instance(self):
self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
instance=self.fake_instance_obj, address='addr',
version='3.13')
def test_remove_volume_connection(self):
self._test_compute_api('remove_volume_connection', 'call',
instance=self.fake_instance, volume_id='id', host='host',
version='3.30')
def test_rescue_instance(self):
self.flags(compute='3.9', group='upgrade_levels')
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance_obj, rescue_password='pw',
version='3.9')
def test_rescue_instance_with_rescue_image_ref_passed(self):
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance_obj, rescue_password='pw',
rescue_image_ref='fake_image_ref', version='3.24')
def test_reset_network(self):
self._test_compute_api('reset_network', 'cast',
instance=self.fake_instance_obj)
def test_resize_instance(self):
self._test_compute_api('resize_instance', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
image='image', instance_type={'id': 1},
reservations=list('fake_res'))
def test_resume_instance(self):
self._test_compute_api('resume_instance', 'cast',
instance=self.fake_instance_obj)
def test_revert_resize(self):
self._test_compute_api('revert_resize', 'cast',
instance=self.fake_instance_obj, migration={'id': 'fake_id'},
host='host', reservations=list('fake_res'))
def test_rollback_live_migration_at_destination(self):
self._test_compute_api('rollback_live_migration_at_destination',
'cast', instance=self.fake_instance_obj, host='host',
destroy_disks=True, migrate_data=None, version='3.32')
def test_run_instance(self):
self._test_compute_api('run_instance', 'cast',
instance=self.fake_instance_obj, host='fake_host',
request_spec='fake_spec', filter_properties={},
requested_networks='networks', injected_files='files',
admin_password='pw', is_first_time=True, node='node',
legacy_bdm_in_spec=False, version='3.27')
def test_set_admin_password(self):
self._test_compute_api('set_admin_password', 'call',
instance=self.fake_instance_obj, new_pass='pw',
version='3.8')
def test_set_host_enabled(self):
self._test_compute_api('set_host_enabled', 'call',
enabled='enabled', host='host')
def test_get_host_uptime(self):
self._test_compute_api('get_host_uptime', 'call', host='host')
def test_backup_instance(self):
self._test_compute_api('backup_instance', 'cast',
instance=self.fake_instance_obj, image_id='id',
backup_type='type', rotation='rotation')
def test_snapshot_instance(self):
self._test_compute_api('snapshot_instance', 'cast',
instance=self.fake_instance_obj, image_id='id')
def test_start_instance(self):
self._test_compute_api('start_instance', 'cast',
instance=self.fake_instance_obj)
def test_stop_instance_cast(self):
self._test_compute_api('stop_instance', 'cast',
instance=self.fake_instance_obj)
def test_stop_instance_call(self):
self._test_compute_api('stop_instance', 'call',
instance=self.fake_instance_obj)
def test_suspend_instance(self):
self._test_compute_api('suspend_instance', 'cast',
instance=self.fake_instance_obj)
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
instance=self.fake_instance_obj, bdms=[],
reservations=['uuid1', 'uuid2'], version='3.22')
def test_unpause_instance(self):
self._test_compute_api('unpause_instance', 'cast',
instance=self.fake_instance_obj)
def test_unrescue_instance(self):
self._test_compute_api('unrescue_instance', 'cast',
instance=self.fake_instance_obj, version='3.11')
def test_shelve_instance(self):
self._test_compute_api('shelve_instance', 'cast',
instance=self.fake_instance_obj, image_id='image_id')
def test_shelve_offload_instance(self):
self._test_compute_api('shelve_offload_instance', 'cast',
instance=self.fake_instance_obj)
def test_unshelve_instance(self):
self._test_compute_api('unshelve_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
filter_properties={'fakeprop': 'fakeval'}, node='node',
version='3.15')
def test_volume_snapshot_create(self):
self._test_compute_api('volume_snapshot_create', 'cast',
instance=self.fake_instance, volume_id='fake_id',
create_info={}, version='3.6')
def test_volume_snapshot_delete(self):
self._test_compute_api('volume_snapshot_delete', 'cast',
instance=self.fake_instance_obj, volume_id='fake_id',
snapshot_id='fake_id2', delete_info={}, version='3.6')
def test_external_instance_event(self):
self._test_compute_api('external_instance_event', 'cast',
instances=[self.fake_instance_obj],
events=['event'],
version='3.23')
def test_build_and_run_instance(self):
self._test_compute_api('build_and_run_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
request_spec={'request': 'spec'}, filter_properties=[],
admin_password='passwd', injected_files=None,
requested_networks=['network1'], security_groups=None,
block_device_mapping=None, node='node', limits=[],
version='3.33')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_build_and_run_instance_icehouse_compat(self, is_neutron):
self.flags(compute='icehouse', group='upgrade_levels')
self._test_compute_api('build_and_run_instance', 'cast',
instance=self.fake_instance_obj, host='host', image='image',
request_spec={'request': 'spec'}, filter_properties=[],
admin_password='passwd', injected_files=None,
requested_networks= objects_network_request.NetworkRequestList(
objects=[objects_network_request.NetworkRequest(
network_id="fake_network_id", address="10.0.0.1",
port_id="fake_port_id")]),
security_groups=None,
block_device_mapping=None, node='node', limits=[],
version='3.23')
| |
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Android Java code.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
This presubmit checks for the following:
- No new calls to Notification.Builder or NotificationCompat.Builder
constructors. Callers should use ChromeNotificationBuilder instead.
- No new calls to AlertDialog.Builder. Callers should use ModalDialogView
instead.
"""
import re
NEW_NOTIFICATION_BUILDER_RE = re.compile(
r'\bnew\sNotification(Compat)?\.Builder\b')
IMPORT_APP_COMPAT_ALERTDIALOG_RE = re.compile(
r'\bimport\sandroid\.support\.v7\.app\.AlertDialog;')
NEW_COMPATIBLE_ALERTDIALOG_BUILDER_RE = re.compile(
r'\bnew\s+(UiUtils\s*\.)?CompatibleAlertDialogBuilder\b')
NEW_ALERTDIALOG_BUILDER_RE = re.compile(
r'\bnew\sAlertDialog\.Builder\b')
COMMENT_RE = re.compile(r'^\s*(//|/\*|\*)')
BROWSER_ROOT = 'chrome/android/java/src/org/chromium/chrome/browser/'
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
result = []
result.extend(_CheckNotificationConstructors(input_api, output_api))
result.extend(_CheckAlertDialogBuilder(input_api, output_api))
result.extend(_CheckCompatibleAlertDialogBuilder(input_api, output_api))
# Add more checks here
return result
def _CheckNotificationConstructors(input_api, output_api):
# "Blacklist" because the following files are excluded from the check.
blacklist = (
'chrome/android/java/src/org/chromium/chrome/browser/notifications/'
'NotificationBuilder.java',
'chrome/android/java/src/org/chromium/chrome/browser/notifications/'
'NotificationCompatBuilder.java'
)
error_msg = '''
Android Notification Construction Check failed:
Your new code added one or more calls to the Notification.Builder and/or
NotificationCompat.Builder constructors, listed below.
This is banned, please construct notifications using
NotificationBuilderFactory.createChromeNotificationBuilder instead,
specifying a channel for use on Android O.
See https://crbug.com/678670 for more information.
'''
return _CheckReIgnoreComment(input_api, output_api, error_msg, blacklist,
NEW_NOTIFICATION_BUILDER_RE)
def _CheckAlertDialogBuilder(input_api, output_api):
# "Blacklist" because the following files are excluded from the check. In
# general, preference and FRE related UIs are not relevant to VR mode.
blacklist = (
BROWSER_ROOT + 'browserservices/ClearDataDialogActivity.java',
BROWSER_ROOT + 'browsing_data/ConfirmImportantSitesDialogFragment.java',
BROWSER_ROOT + 'browsing_data/OtherFormsOfHistoryDialogFragment.java',
BROWSER_ROOT + 'datareduction/settings/DataReductionStatsPreference.java',
BROWSER_ROOT + 'password_manager/AccountChooserDialog.java',
BROWSER_ROOT + 'password_manager/AutoSigninFirstRunDialog.java',
BROWSER_ROOT + r'settings[\\\/].*',
BROWSER_ROOT + 'signin/AccountPickerDialogFragment.java',
BROWSER_ROOT + 'signin/AccountSigninView.java',
BROWSER_ROOT + 'signin/ConfirmImportSyncDataDialog.java',
BROWSER_ROOT + 'signin/ConfirmManagedSyncDataDialog.java',
BROWSER_ROOT + 'signin/ConfirmSyncDataStateMachineDelegate.java',
BROWSER_ROOT + 'signin/SigninFragmentBase.java',
BROWSER_ROOT + 'signin/SignOutDialogFragment.java',
BROWSER_ROOT + 'site_settings/AddExceptionPreference.java',
BROWSER_ROOT + 'site_settings/ChosenObjectSettings.java',
BROWSER_ROOT + 'site_settings/ManageSpaceActivity.java',
BROWSER_ROOT + 'site_settings/ManageSpaceActivity.java',
BROWSER_ROOT + 'site_settings/SingleCategorySettings.java',
BROWSER_ROOT + 'site_settings/SingleWebsiteSettings.java',
BROWSER_ROOT + 'sync/settings/ManageSyncSettings.java',
BROWSER_ROOT + 'sync/settings/SyncAndServicesSettings.java',
BROWSER_ROOT + 'sync/ui/PassphraseCreationDialogFragment.java',
BROWSER_ROOT + 'sync/ui/PassphraseDialogFragment.java',
BROWSER_ROOT + 'sync/ui/PassphraseTypeDialogFragment.java',
)
error_msg = '''
AlertDialog.Builder Check failed:
Your new code added one or more calls to the AlertDialog.Builder, listed
below.
We recommend you use ModalDialogProperties to show a dialog whenever possible
to support VR mode. You could only keep the AlertDialog if you are certain
that your new AlertDialog is not used in VR mode (e.g. pereference, FRE)
If you are in doubt, contact
//src/chrome/android/java/src/org/chromium/chrome/browser/vr/VR_JAVA_OWNERS
'''
error_files = []
result = _CheckReIgnoreComment(input_api, output_api, error_msg, blacklist,
NEW_ALERTDIALOG_BUILDER_RE, error_files)
wrong_builder_errors = []
wrong_builder_error_msg = '''
Android Use of AppCompat AlertDialog.Builder Check failed:
Your new code added one or more calls to the AppCompat AlertDialog.Builder,
file listed below.
If you are keeping the new AppCompat AlertDialog.Builder, please use
CompatibleAlertDialogBuilder instead to work around support library issues.
See https://crbug.com/966101 for more information.
'''
for f in error_files:
contents = input_api.ReadFile(f)
if IMPORT_APP_COMPAT_ALERTDIALOG_RE.search(contents):
wrong_builder_errors.append(' %s' % (f.LocalPath()))
if wrong_builder_errors:
result.extend([output_api.PresubmitError(
wrong_builder_error_msg, wrong_builder_errors)])
return result
def _CheckCompatibleAlertDialogBuilder(input_api, output_api):
# "Blacklist" because the following files are excluded from the check.
blacklist = (
BROWSER_ROOT + 'LoginPrompt.java',
BROWSER_ROOT + 'SSLClientCertificateRequest.java',
BROWSER_ROOT + 'autofill/AutofillPopupBridge.java',
BROWSER_ROOT + 'autofill/keyboard_accessory/'
'AutofillKeyboardAccessoryBridge.java',
BROWSER_ROOT + 'dom_distiller/DistilledPagePrefsView.java',
BROWSER_ROOT + 'dom_distiller/DomDistillerUIUtils.java',
BROWSER_ROOT + 'download/DownloadController.java',
BROWSER_ROOT + 'download/OMADownloadHandler.java',
BROWSER_ROOT + 'externalnav/ExternalNavigationDelegateImpl.java',
BROWSER_ROOT + 'payments/AndroidPaymentApp.java',
BROWSER_ROOT + 'permissions/AndroidPermissionRequester.java',
BROWSER_ROOT + 'share/ShareDelegateImpl.java',
BROWSER_ROOT + 'util/AccessibilityUtil.java',
BROWSER_ROOT + 'webapps/AddToHomescreenDialog.java',
BROWSER_ROOT + 'webapps/WebappOfflineDialog.java',
)
error_msg = '''
Android Use of CompatibleAlertDialogBuilder Check failed:
Your new code added one or more calls to the CompatibleAlertDialogBuilder
constructors, listed below.
We recommend you use ModalDialogProperties to show a dialog whenever possible
to support VR mode. You could only keep the AlertDialog if you are certain
that your new AlertDialog is not used in VR mode (e.g. pereference, FRE)
If you are in doubt, contact
//src/chrome/android/java/src/org/chromium/chrome/browser/vr/VR_JAVA_OWNERS
'''
return _CheckReIgnoreComment(input_api, output_api, error_msg, blacklist,
NEW_COMPATIBLE_ALERTDIALOG_BUILDER_RE)
def _CheckReIgnoreComment(input_api, output_api, error_msg, blacklist,
regular_expression, error_files=None):
def CheckLine(current_file, line_number, line, problems, error_files):
"""Returns a boolean whether the line contains an error."""
if (regular_expression.search(line) and not COMMENT_RE.search(line)):
if error_files is not None:
error_files.append(current_file)
problems.append(
' %s:%d\n \t%s' %
(current_file.LocalPath(), line_number, line.strip()))
return True
return False
problems = []
sources = lambda x: input_api.FilterSourceFile(
x, white_list=(r'.*\.java$',), black_list=blacklist)
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=sources):
previous_line = ''
for line_number, line in f.ChangedContents():
if not CheckLine(f, line_number, line, problems, error_files):
if previous_line:
two_lines = '\n'.join([previous_line, line])
CheckLine(f, line_number, two_lines, problems, error_files)
previous_line = line
else:
previous_line = ''
if problems:
return [output_api.PresubmitError(error_msg, problems)]
return []
| |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Templates for constructing various sorts of invalid transactions.
These templates (or an iterator over all of them) can be reused in different
contexts to test using a number of invalid transaction types.
Hopefully this makes it easier to get coverage of a full variety of tx
validation checks through different interfaces (AcceptBlock, AcceptToMemPool,
etc.) without repeating ourselves.
Invalid tx cases not covered here can be found by running:
$ diff \
<(grep -IREho "bad-txns[a-zA-Z-]+" src | sort -u) \
<(grep -IEho "bad-txns[a-zA-Z-]+" test/functional/data/invalid_txs.py | sort -u)
"""
import abc
from typing import Optional
from test_framework import script as sc
from test_framework.blocktools import create_tx_with_script
from test_framework.messages import (
MAX_MONEY,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
)
from test_framework.script import (
OP_2DIV,
OP_2MUL,
OP_INVERT,
OP_LSHIFT,
OP_MUL,
OP_RSHIFT,
CScript,
)
from test_framework.txtools import pad_tx
basic_p2sh = sc.CScript(
[sc.OP_HASH160, sc.hash160(sc.CScript([sc.OP_0])), sc.OP_EQUAL])
class BadTxTemplate:
"""Allows simple construction of a certain kind of invalid tx. Base class to be subclassed."""
__metaclass__ = abc.ABCMeta
# The expected error code given by bitcoind upon submission of the tx.
reject_reason: Optional[str] = ""
# Only specified if it differs from mempool acceptance error.
block_reject_reason = ""
# Do we expect to be disconnected after submitting this tx?
expect_disconnect = False
# Is this tx considered valid when included in a block, but not for acceptance into
# the mempool (i.e. does it violate policy but not consensus)?
valid_in_block = False
def __init__(self, *, spend_tx=None, spend_block=None):
self.spend_tx = spend_block.vtx[0] if spend_block else spend_tx
self.spend_avail = sum(o.nValue for o in self.spend_tx.vout)
self.valid_txin = CTxIn(
COutPoint(
self.spend_tx.sha256,
0),
b"",
0xffffffff)
@abc.abstractmethod
def get_tx(self, *args, **kwargs):
"""Return a CTransaction that is invalid per the subclass."""
pass
class OutputMissing(BadTxTemplate):
reject_reason = "bad-txns-vout-empty"
expect_disconnect = True
def get_tx(self):
tx = CTransaction()
tx.vin.append(self.valid_txin)
tx.calc_sha256()
return tx
class InputMissing(BadTxTemplate):
reject_reason = "bad-txns-vin-empty"
expect_disconnect = True
def get_tx(self):
tx = CTransaction()
tx.vout.append(CTxOut(0, sc.CScript([sc.OP_TRUE] * 100)))
tx.calc_sha256()
return tx
class SizeTooSmall(BadTxTemplate):
reject_reason = "bad-txns-undersize"
expect_disconnect = False
valid_in_block = True
def get_tx(self):
tx = CTransaction()
tx.vin.append(self.valid_txin)
tx.vout.append(CTxOut(0, sc.CScript([sc.OP_TRUE])))
tx.calc_sha256()
return tx
class BadInputOutpointIndex(BadTxTemplate):
# Won't be rejected - nonexistent outpoint index is treated as an orphan since the coins
# database can't distinguish between spent outpoints and outpoints which
# never existed.
reject_reason = None
expect_disconnect = False
def get_tx(self):
num_indices = len(self.spend_tx.vin)
bad_idx = num_indices + 100
tx = CTransaction()
tx.vin.append(
CTxIn(
COutPoint(
self.spend_tx.sha256,
bad_idx),
b"",
0xffffffff))
tx.vout.append(CTxOut(0, basic_p2sh))
tx.calc_sha256()
return tx
class DuplicateInput(BadTxTemplate):
reject_reason = 'bad-txns-inputs-duplicate'
expect_disconnect = True
def get_tx(self):
tx = CTransaction()
tx.vin.append(self.valid_txin)
tx.vin.append(self.valid_txin)
tx.vout.append(CTxOut(1, basic_p2sh))
tx.calc_sha256()
return tx
class NonexistentInput(BadTxTemplate):
# Added as an orphan tx.
reject_reason = None
expect_disconnect = False
def get_tx(self):
tx = CTransaction()
tx.vin.append(
CTxIn(
COutPoint(
self.spend_tx.sha256 +
1,
0),
b"",
0xffffffff))
tx.vin.append(self.valid_txin)
tx.vout.append(CTxOut(1, basic_p2sh))
tx.calc_sha256()
return tx
class SpendTooMuch(BadTxTemplate):
reject_reason = 'bad-txns-in-belowout'
expect_disconnect = True
def get_tx(self):
return create_tx_with_script(
self.spend_tx, 0, script_pub_key=basic_p2sh, amount=(self.spend_avail + 1))
class CreateNegative(BadTxTemplate):
reject_reason = 'bad-txns-vout-negative'
expect_disconnect = True
def get_tx(self):
return create_tx_with_script(self.spend_tx, 0, amount=-1)
class CreateTooLarge(BadTxTemplate):
reject_reason = 'bad-txns-vout-toolarge'
expect_disconnect = True
def get_tx(self):
return create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY + 1)
class CreateSumTooLarge(BadTxTemplate):
reject_reason = 'bad-txns-txouttotal-toolarge'
expect_disconnect = True
def get_tx(self):
tx = create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY)
tx.vout = [tx.vout[0]] * 2
tx.calc_sha256()
return tx
class InvalidOPIFConstruction(BadTxTemplate):
reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)"
expect_disconnect = True
valid_in_block = True
def get_tx(self):
return create_tx_with_script(
self.spend_tx, 0, script_sig=b'\x64' * 35,
amount=(self.spend_avail // 2))
def getDisabledOpcodeTemplate(opcode):
""" Creates disabled opcode tx template class"""
def get_tx(self):
tx = CTransaction()
vin = self.valid_txin
vin.scriptSig = CScript([opcode])
tx.vin.append(vin)
tx.vout.append(CTxOut(1, basic_p2sh))
pad_tx(tx)
tx.calc_sha256()
return tx
return type('DisabledOpcode_' + str(opcode), (BadTxTemplate,), {
'reject_reason': "disabled opcode",
'expect_disconnect': True,
'get_tx': get_tx,
'valid_in_block': True
})
# Disabled opcode tx templates (CVE-2010-5137)
DisabledOpcodeTemplates = [getDisabledOpcodeTemplate(opcode) for opcode in [
OP_INVERT,
OP_2MUL,
OP_2DIV,
OP_MUL,
OP_LSHIFT,
OP_RSHIFT]]
def iter_all_templates():
"""Iterate through all bad transaction template types."""
return BadTxTemplate.__subclasses__()
| |
import sys
import scipy.sparse as sp
import scipy.sparse.linalg as lin
import numpy as np
from bisect import bisect
import itertools as it
from diffusion import *
# Hypercube MinMax Ant Optimization #
def optimize (pool, s, nodes, sparseMat):
# for each new convergence #
for run in xrange(s["runs"]):
print "RUN: " + str(run)
# prepare for this reset-run #
nodes = resetNodes(nodes)
s = resetState(s)
iters = 0; maxIters = 100000;
while iters < maxIters and s["c"] > s["ct"]:
# generate the probability distribution #
ps = genProbs(s,nodes)
# for each new ant, generate a solution, local search, and score
(bestSoln,bestScore) = antWork(pool, s, np.copy(ps), sparseMat, nodes)
# update the best/resetbest/iterbests #
s = updateSolutionSet(s, bestSoln, bestScore, iters)
# update the pheromones #
nodes = updatePheromones(s, nodes)
# check for convergence #
s = checkConvergence(s, nodes)
iters += 1
s["iters"] = iters
return(s)
def resetNodes(nodes):
for k in nodes.keys():
(a,b,c,d,e) = nodes[k]
nodes[k] = (a,b,c,0.5,e)
return(nodes)
def resetState(s):
s["bestRest"] = (0.0,0.0,[])
s["bestIter"] = (0.0,0.0,[])
s["c"] = 1.0
return(s)
def genProbs(s,nodes):
# generate the probablities for selecting each node
ps = np.zeros(len(nodes))
for k in nodes.keys():
ps[k] = (pow(nodes[k][2], s["alph"]) * pow(nodes[k][3], s["beta"]))
return(ps)
def antWork(pool, s, ps, sparseMat, nodes):
# for the solns
nants = s["ants"]
seeds = [np.random.randint(1000000) for i in xrange(nants)] # seed each thread
# generate the list of data for each ant
antdat = (it.izip(xrange(nants), it.repeat(s, nants), it.repeat(ps, nants),
it.repeat(sparseMat, nants), seeds, it.repeat(nodes, nants)))
# send it to a pooled fun party
solns = pool.map(poolParty, antdat)
# return the best
return(scoreMax(solns, s))
def poolParty( (i, s, ps, sparseMat, seedi, nodes) ):
# start with a possible solution
np.random.seed(seedi)
soln = genSoln(i,s,np.copy(ps))
# then do a local search
(soln2, score) = localSearch(s, np.copy(ps), soln, sparseMat, nodes)
# return the best
return(soln2, score)
def genSoln(i, s, ps):
# generate a solution, probabilistically for each ant.
soln = []
for ki in xrange(int(s["k"])):
ps = ps/(sum(ps)) # after removing one ... renorm the probs
cs = ps.cumsum()
solni = bisect(cs,np.random.random()) # should skip over the 0'ed ones...
soln.append(solni)
ps[solni] = 0
return(soln)
def localSearch(s, ps, bestSoln, sparseMat, nodes):
(bestScore,bestTouch) = scoreSoln(bestSoln, s, sparseMat, nodes)
# soln now doesn't include the edge weights
if s["local"] == -1: # none
# go back! go back!
return (bestSoln,(bestScore,bestTouch))
else:
# hill climbing for a certain number of steps
newSoln = list(bestSoln)
newScore = bestScore
newTouch = bestTouch
ps = ps/(sum(ps))
cs = ps.cumsum()
n = s["local"] # the number of tries to make
testsoln = list(newSoln)
for i in xrange(n):
remr = testsoln[np.random.randint(0,len(testsoln),1)] # the one to remove
solnr = [xi for xi in testsoln if xi != remr] # fragment list
solni = testsoln[0]; # pick a new one, not in the list already
while solni in testsoln:
solni = bisect(cs,np.random.random()) # the one to add, based on ps
testsoln = list( (solnr + [solni]) ) # the new soln list
score = scoreSoln(testsoln, s, sparseMat, nodes) # score it
if s["opton"] == "touch":
if score[1] > newTouch:
newScore = score[0] # if better: keep it
newTouch = score[1]
newSoln = list(testsoln)
else:
testsoln = list(newSoln) # else: return to previous soln
elif s["opton"] == "score":
if score[0] > newScore:
newScore = score[0] # if better: keep it
newTouch = score[1]
newSoln = list(testsoln)
else:
testsoln = list(newSoln) # else: return to previous soln
else:
print "This opton mode is not implemented yet!, use score or touch."
sys.exit(1)
return (newSoln, (newScore, newTouch))
def combo(a,b):
return(a*b)
def updateSolutionSet(s, bestSoln, (bestScore,bestTouch), iters):
if s["opton"] == "score":
if bestScore > s["bestEver"][0]:
s["bestEver"] = (bestScore, bestTouch, bestSoln)
if bestScore > s["bestRest"][0]:
s["bestRest"] = (bestScore, bestTouch, bestSoln)
if bestScore > s["bestIter"][0]:
s["bestIter"] = (bestScore, bestTouch, bestSoln)
elif s["opton"] == "touch":
if bestTouch > s["bestEver"][1]:
s["bestEver"] = (bestScore, bestTouch, bestSoln)
if bestTouch > s["bestRest"][1]:
s["bestRest"] = (bestScore, bestTouch, bestSoln)
if bestTouch > s["bestIter"][1]:
s["bestIter"] = (bestScore, bestTouch, bestSoln)
elif s["opton"] == "combo":
if combo(bestTouch,bestScore) > combo(s["bestEver"][1],s["bestEver"][0]):
s["bestEver"] = (bestScore, bestTouch, bestSoln)
if combo(bestTouch,bestScore) > combo(s["bestRest"][1],s["bestRest"][0]):
s["bestRest"] = (bestScore, bestTouch, bestSoln)
if combo(bestTouch*bestScore) > combo(s["bestIter"][1],s["bestIter"][0]):
s["bestIter"] = (bestScore, bestTouch, bestSoln)
else:
print "Update Solution Error! config option 'opton' must be score, touch, or combo"
sys.exit(1)
return(s)
def updatePheromones(s, nodes):
(iterp, restp, bestp) = pheroProportions(s)
restartSoln = s["bestRest"][2]
iterateSoln = s["bestIter"][2]
bestSoln = s["bestEver"][2]
for k in nodes.keys():
(a,b,w,p,ch) = nodes[k]
inRest = int(k in restartSoln)
inIter = int(k in iterateSoln)
inBest = int(k in bestSoln)
deposit = inIter * iterp + inRest * restp + inBest * bestp
p2 = bounded(p + s["evap"]*(deposit - p))
nodes[k] = (a,b,w,p2,(ch+(inIter+inRest+inBest)/3.0))
return(nodes)
def pheroProportions(s):
# return proportions of solutions to use
# (iteration, restart, best)
sc = s["c"]
if sc > 0.8:
x= (1.0, 0.0, 0.0) #- just started out, use iteration best
elif sc >= 0.6 and sc < 0.8:
x= (0.6669, 0.3331, 0.0)
elif sc >= 0.4 and sc < 0.6:
x= (0.3331, 0.6669, 0.0) # nearing the end - move to restart
elif sc >= 0.2 and sc > 0.4:
x= (0.0, 0.6669, 0.3331)
elif sc >= 0.1 and sc < 0.2:
x= (0.0, 0.3331, 0.6669) # nearing the end - move to best ever
else:
x = (0.0,0.0,1.0)
return(x)
def bounded(x):
if x < 0.001:
return(0.001)
elif x > 0.999:
return(0.999)
else:
return(x)
def checkConvergence(s, nodes):
normfactor = (0.999-0.001) * len(nodes)
ps = [p for (i,(a,b,c,p,e)) in nodes.items()]
convergence = 1.0 - 2.0 * (( sum(map(convNum, ps)) / normfactor ) - 0.5 )
pToKRatio = (sum (ps)) / ((0.999-0.001) * s["k"])
s["c"] = convergence
s["pTokRatio"] = pToKRatio
return(s)
def divn(x, n):
round(x/n)
def convNum(x):
return(max(0.999-x, x-0.001))
| |
import os
from neo4j.exceptions import ConfigurationError
from neo4j.graph import Relationship
import provdbconnector.db_adapters.neo4j.cypher_commands as cypher_commands
from provdbconnector.db_adapters.baseadapter import BaseAdapter
from provdbconnector.db_adapters.baseadapter import METADATA_KEY_PROV_TYPE, METADATA_KEY_TYPE_MAP, \
METADATA_KEY_IDENTIFIER, METADATA_KEY_NAMESPACES
from provdbconnector.exceptions.database import InvalidOptionsException, AuthException, \
DatabaseException, CreateRecordException, NotFoundException, CreateRelationException, MergeException
from neo4j import GraphDatabase, basic_auth
from prov.constants import PROV_N_MAP
from collections import namedtuple
from provdbconnector.utils.serializer import encode_string_value_to_primitive, encode_dict_values_to_primitive, \
split_into_formal_and_other_attributes
import logging
logging.getLogger("neo4j.bolt").setLevel(logging.WARN)
log = logging.getLogger(__name__)
NEO4J_USER = os.environ.get('NEO4J_USERNAME', 'neo4j')
NEO4J_PASS = os.environ.get('NEO4J_PASSWORD', 'neo4jneo4j')
NEO4J_HOST = os.environ.get('NEO4J_HOST', 'localhost')
NEO4J_BOLT_PORT = os.environ.get('NEO4J_BOLT_PORT', '7687')
NEO4J_HTTP_PORT = os.environ.get('NEO4J_HTTP_PORT', '7474')
NEO4J_META_PREFIX = "meta:"
class Neo4jAdapter(BaseAdapter):
"""
This is the neo4j adapter to store prov. data in a neo4j database
"""
def __init__(self, *args):
"""
Setup the class
:param args: None
"""
super(Neo4jAdapter, self).__init__()
self.driver = None
pass
def _create_session(self):
"""
Get a session from the driver
:return: Session
:rtype Session
"""
try:
session = self.driver.session()
except OSError as e:
raise AuthException(e)
return session
def connect(self, authentication_options):
"""
The connect method to create a new instance of the db_driver
:param authentication_options: Username, password, host and encrypted option
:return: None
:rtype: None
:raises: InvalidOptionsException
"""
if authentication_options is None:
raise InvalidOptionsException()
user_name = authentication_options.get("user_name")
user_pass = authentication_options.get("user_password")
encrypted = authentication_options.get("encrypted")
host = authentication_options.get("host")
if encrypted is None:
encrypted = False
if user_name is None or user_pass is None or host is None:
raise InvalidOptionsException()
try:
self.driver = GraphDatabase.driver("bolt://{}".format(host), encrypted=encrypted, auth=basic_auth(user_name, user_pass))
except ConfigurationError as e:
raise InvalidOptionsException(e)
self._create_session()
@staticmethod
def _prefix_metadata(metadata):
"""
Prefix all keys of a dict, only for the neo4j adapter
:param metadata:
:return: A dict with prefixed keys
"""
prefixed_metadata = dict()
for key, value in metadata.items():
prefixed_metadata["{meta_prefix}{key}".format(key=key, meta_prefix=NEO4J_META_PREFIX)] = value
return prefixed_metadata
@staticmethod
def _parse_to_primitive_attributes(attributes, prefixed_metadata):
"""
Convert the dict values and keys into a neo4j friendly type (dict=>json, list,int,float, QualifiedName=>str, datetime=>str)
:param attributes:
:param prefixed_metadata:
:return:
"""
all_attributes = attributes.copy()
all_attributes.update(prefixed_metadata)
db_attributes = dict()
# transform values
for key, value in all_attributes.items():
key_primitive = encode_string_value_to_primitive(key)
value_primitive = encode_string_value_to_primitive(value)
db_attributes.update({key_primitive: value_primitive})
return db_attributes
@staticmethod
def _get_attributes_identifiers_cypher_string(key_list):
"""
This function return a cypher string with all keys as cypher parameters
:param key_list:
:return:
"""
db_attributes_identifiers = map(lambda key: "`{}`: {{`{}`}}".format(key, key), key_list)
return ",".join(db_attributes_identifiers)
@staticmethod
def _get_attributes_set_cypher_string(key_list, cypher_template=cypher_commands.NEO4J_CREATE_NODE_SET_PART):
"""
Returns a set cypher command for all keys of the keylist
:param key_list:
:param cypher_template:
:return:
"""
statements = list()
for key in key_list:
statements.append(cypher_template.format(attr_name=key))
return " ".join(statements)
def save_element(self, attributes, metadata):
"""
Saves a single record
:param attributes: The attributes dict
:type attributes: dict
:param metadata: The metadata dict
:type metadata: dict
:return: The id of the record
:rtype: str
"""
metadata = metadata.copy()
prefixed_metadata = self._prefix_metadata(metadata)
# setup merge attributes
(formal_attributes, other_attributes) = split_into_formal_and_other_attributes(attributes, metadata)
merge_relevant_keys = list()
merge_relevant_keys.append("meta:{}".format(METADATA_KEY_IDENTIFIER))
merge_relevant_keys = merge_relevant_keys + list(formal_attributes.keys())
other_db_attribute_keys = list()
other_db_attribute_keys = other_db_attribute_keys + list(other_attributes.keys())
other_db_attribute_keys = other_db_attribute_keys + list(prefixed_metadata.keys())
# get set statement for non formal attributes
attr_for_simple_set = other_db_attribute_keys.copy()
attr_for_simple_set.remove("meta:" + METADATA_KEY_NAMESPACES)
attr_for_simple_set.remove("meta:" + METADATA_KEY_TYPE_MAP)
cypher_set_statement = self._get_attributes_set_cypher_string(attr_for_simple_set)
attr_for_list_merge = list()
attr_for_list_merge.append("meta:" + METADATA_KEY_NAMESPACES)
attr_for_list_merge.append("meta:" + METADATA_KEY_TYPE_MAP)
cypher_set_statement += self._get_attributes_set_cypher_string(attr_for_list_merge,
cypher_commands.NEO4J_CREATE_NODE_SET_PART_MERGE_ATTR)
# get CASE WHEN ... statement to check if a attribute is different
cypher_merge_check_statement = self._get_attributes_set_cypher_string(attr_for_simple_set,
cypher_commands.NEO4J_CREATE_NODE_MERGE_CHECK_PART)
# get cypher string for the merge relevant attributes
cypher_merge_relevant_str = self._get_attributes_identifiers_cypher_string(merge_relevant_keys)
# get prov type
provtype = metadata[METADATA_KEY_PROV_TYPE]
# get db_attributes as dict
db_attributes = self._parse_to_primitive_attributes(attributes, prefixed_metadata)
session = self._create_session()
command = cypher_commands.NEO4J_CREATE_NODE_RETURN_ID.format(label=provtype.localpart,
formal_attributes=cypher_merge_relevant_str,
set_statement=cypher_set_statement,
merge_check_statement=cypher_merge_check_statement)
with session.begin_transaction() as tx:
result = tx.run(command, dict(db_attributes))
record_id = None
merge_success = 0
for record in result:
record_id = record["ID"]
merge_success = record["check"]
if record_id is None:
raise CreateRecordException("No ID property returned by database for the command {}".format(command))
if merge_success == 0:
tx.success = True
else:
tx.success = False
raise MergeException(
"The attributes {other} could not merged into the existing node, All attributes: {all} ".format(
other=other_db_attribute_keys, all=db_attributes))
return str(record_id)
def save_relation(self, from_node, to_node, attributes, metadata):
"""
Save a single relation
:param from_node: The from node as QualifiedName
:type from_node: QualifiedName
:param to_node: The to node as QualifiedName
:type to_node: QualifiedName
:param attributes: The attributes dict
:type attributes: dict
:param metadata: The metadata dict
:type metadata: dict
:return: Id of the relation
:rtype: str
"""
metadata = metadata.copy()
prefixed_metadata = self._prefix_metadata(metadata)
# setup merge attributes
(formal_attributes, other_attributes) = split_into_formal_and_other_attributes(attributes, metadata)
merge_relevant_keys = list()
merge_relevant_keys.append("meta:{}".format(METADATA_KEY_IDENTIFIER))
merge_relevant_keys = merge_relevant_keys + list(formal_attributes.keys())
other_db_attribute_keys = list()
other_db_attribute_keys = other_db_attribute_keys + list(other_attributes.keys())
other_db_attribute_keys = other_db_attribute_keys + list(prefixed_metadata.keys())
# get set statement for non formal attributes
# Remove namespace and type_map from the direct set statement, because this attributes need to be merged
attr_for_simple_set = other_db_attribute_keys.copy()
attr_for_simple_set.remove("meta:" + METADATA_KEY_NAMESPACES)
attr_for_simple_set.remove("meta:" + METADATA_KEY_TYPE_MAP)
cypher_set_statement = self._get_attributes_set_cypher_string(attr_for_simple_set)
# Add separate cypher command to merge the namespaces and tpye map into a list
attr_for_list_merge = list()
attr_for_list_merge.append("meta:" + METADATA_KEY_NAMESPACES)
attr_for_list_merge.append("meta:" + METADATA_KEY_TYPE_MAP)
cypher_set_statement += self._get_attributes_set_cypher_string(attr_for_list_merge,
cypher_commands.NEO4J_CREATE_NODE_SET_PART_MERGE_ATTR)
# get CASE WHEN ... statement to check if a attribute is different
cypher_merge_check_statement = self._get_attributes_set_cypher_string(attr_for_simple_set,
cypher_commands.NEO4J_CREATE_NODE_MERGE_CHECK_PART)
# get cypher string for the merge relevant attributes
cypher_merge_relevant_str = self._get_attributes_identifiers_cypher_string(merge_relevant_keys)
# get db_attributes as dict
db_attributes = self._parse_to_primitive_attributes(attributes, prefixed_metadata)
with self._create_session() as session:
relationtype = PROV_N_MAP[metadata[METADATA_KEY_PROV_TYPE]]
command = cypher_commands.NEO4J_CREATE_RELATION_RETURN_ID.format(from_identifier=str(from_node),
to_identifier=str(to_node),
relation_type=relationtype,
formal_attributes=cypher_merge_relevant_str,
merge_check_statement=cypher_merge_check_statement,
set_statement=cypher_set_statement
)
with session.begin_transaction() as tx:
result = tx.run(command, dict(db_attributes))
record_id = None
merge_success = 0
for record in result:
record_id = record["ID"]
merge_success = record["check"]
if record_id is None:
raise CreateRelationException("No ID property returned by database for the command {}".format(command))
if merge_success == 0:
tx.success = True
else:
tx.success = False
raise MergeException("The attributes {other} could not merged into the existing node ".format(
other=other_db_attribute_keys))
return str(record_id)
@staticmethod
def _split_attributes_metadata_from_node(db_node):
"""
This functions splits a db node back into attributes and metadata, based on the prefix
:param db_node:
:type db_node: dict
:return: namedTuple(attributes,metadata)
"""
record = namedtuple('Record', 'attributes, metadata')
# split data
metadata = {k.replace(NEO4J_META_PREFIX, ""): v for k, v in db_node._properties.items() if
k.startswith(NEO4J_META_PREFIX, 0, len(NEO4J_META_PREFIX))}
attributes = {k: v for k, v in db_node._properties.items() if
not k.startswith(NEO4J_META_PREFIX, 0, len(NEO4J_META_PREFIX))}
# convert a list of namespace into a string if it is only one item
# @todo Kind of a hack to pass all test, it is also allowed to return a list of JSON encoded strings
namespaces = metadata[METADATA_KEY_NAMESPACES]
if isinstance(namespaces, list):
# If len is 1 return only the raw JSON string
if len(namespaces) == 1:
metadata.update({METADATA_KEY_NAMESPACES: namespaces.pop()})
# convert a list of namespace into a string if it is only one item
# @todo Kind of a hack to pass all test, it is also allowed to return a list of JSON encoded strings
# @todo Find out what I hacked here in 2015...
type_map = metadata[METADATA_KEY_TYPE_MAP]
if isinstance(type_map, list):
# If len is 1 return only the raw JSON string
if len(type_map) == 1:
metadata.update({METADATA_KEY_TYPE_MAP: type_map.pop()})
record = record(attributes, metadata)
return record
def _get_cypher_filter_params(self, properties_dict, metadata_dict):
"""
This functions returns a tuple with the cypher_str for the cypher filter and the right parameter names
:param properties_dict: Search dict
:param metadata_dict: Seacrh dict
:return: Tuple(Keys with metadata prefix (if necessary), cypher filter str )
"""
metadata_dict_prefixed = {"meta:{}".format(k): v for k, v in metadata_dict.items()}
# Merge the 2 dicts into one
filter = properties_dict.copy()
filter.update(metadata_dict_prefixed)
encoded_params = encode_dict_values_to_primitive(filter)
cypher_str = self._get_attributes_identifiers_cypher_string(filter.keys())
return encoded_params, cypher_str
def get_records_by_filter(self, attributes_dict=None, metadata_dict=None):
"""
Return the records by a certain filter
:param attributes_dict: Filter dict
:type attributes_dict: dict
:param metadata_dict: Filter dict for metadata
:type metadata_dict: dict
:return: list of all nodes and relations that fit the conditions
:rtype: list(DbRecord and DbRelation)
"""
if attributes_dict is None:
attributes_dict = dict()
if metadata_dict is None:
metadata_dict = dict()
(encoded_params, cypher_str) = self._get_cypher_filter_params(attributes_dict, metadata_dict)
session = self._create_session()
records = list()
result_set = session.run(cypher_commands.NEO4J_GET_RECORDS_BY_PROPERTY_DICT.format(filter_dict=cypher_str), encoded_params)
for result in result_set:
record = result["re"]
if record is None:
raise DatabaseException("Record response should not be None")
relation_record = self._split_attributes_metadata_from_node(record)
records.append(relation_record)
return records
def get_records_tail(self, attributes_dict=None, metadata_dict=None, depth=None):
"""
Return all connected nodes form the origin.
:param attributes_dict: Filter dict
:type attributes_dict: dict
:param metadata_dict: Filter dict for metadata
:type metadata_dict: dict
:param depth: Max steps
:return: list of all nodes and relations that fit the conditions
:rtype: list(DbRecord and DbRelation)
"""
if attributes_dict is None:
attributes_dict = dict()
if metadata_dict is None:
metadata_dict = dict()
(encoded_params, cypher_str) = self._get_cypher_filter_params(attributes_dict, metadata_dict)
depth_str = ""
if depth is not None:
depth_str = "1..{max}".format(max=depth)
session = self._create_session()
result_set = session.run(cypher_commands.NEO4J_GET_RECORDS_TAIL_BY_FILTER.format(filter_dict=cypher_str, depth=depth_str),
encoded_params)
records = list()
for result in result_set:
record = result["re"]
if record is None:
raise DatabaseException("Record response should not be None")
relation_record = self._split_attributes_metadata_from_node(record)
records.append(relation_record)
return records
def get_bundle_records(self, bundle_identifier):
"""
Return all records and relations for the bundle
:param bundle_identifier:
:return:
"""
session = self._create_session()
result_set = session.run(cypher_commands.NEO4J_GET_BUNDLE_RECORDS,
{'meta:{}'.format(METADATA_KEY_IDENTIFIER): str(bundle_identifier)})
records = list()
for result in result_set:
record = result["re"]
if record is None:
raise DatabaseException("Record response should not be None")
relation_record = self._split_attributes_metadata_from_node(record)
records.append(relation_record)
return records
def get_record(self, record_id):
"""
Try to find the record in the database
:param record_id:
:return: DbRecord
:rtype: DbRecord
"""
session = self._create_session()
result_set = session.run(cypher_commands.NEO4J_GET_RECORD_RETURN_NODE, {"record_id": int(record_id)})
node = None
for result in result_set:
if node is not None:
raise DatabaseException(
"get_record should return only one node for the id {}, command {}".format(record_id,
cypher_commands.NEO4J_GET_RECORD_RETURN_NODE))
node = result["node"]
if node is None:
raise NotFoundException("We cant find the node with the id: {}, database command {}".format(record_id,
cypher_commands.NEO4J_GET_RECORD_RETURN_NODE))
return self._split_attributes_metadata_from_node(node)
def get_relation(self, relation_id):
"""
Get a relation
:param relation_id:
:return: The relation
:rtype: DbRelation
"""
session = self._create_session()
result_set = session.run(cypher_commands.NEO4J_GET_RELATION_RETURN_NODE, {"relation_id": int(relation_id)})
relation = None
for result in result_set:
if not isinstance(result["relation"], Relationship):
raise DatabaseException(
" should return only relationship {}, command {}".format(relation_id, cypher_commands.NEO4J_GET_RECORD_RETURN_NODE))
relation = result["relation"]
if relation is None:
raise NotFoundException("We cant find the relation with the id: {}, database command {}".format(relation_id,
cypher_commands.NEO4J_GET_RECORD_RETURN_NODE))
return self._split_attributes_metadata_from_node(relation)
def delete_records_by_filter(self, attributes_dict=None, metadata_dict=None):
"""
Delete records and relations by a filter
:param attributes_dict:
:param metadata_dict:
:return:
"""
if attributes_dict is None:
attributes_dict = dict()
if metadata_dict is None:
metadata_dict = dict()
(encoded_params, cypher_str) = self._get_cypher_filter_params(attributes_dict, metadata_dict)
session = self._create_session()
session.run(cypher_commands.NEO4J_DELETE_NODE_BY_PROPERTIES.format(filter_dict=cypher_str), encoded_params)
return True
def delete_record(self, record_id):
"""
Delete a single record
:param record_id:
:return:
"""
session = self._create_session()
session.run(cypher_commands.NEO4J_DELETE__NODE_BY_ID, {"node_id": int(record_id)})
return True
def delete_relation(self, relation_id):
"""
Delete a single relation
:param relation_id:
:return:
"""
session = self._create_session()
session.run(cypher_commands.NEO4J_DELETE_RELATION_BY_ID, {"relation_id": int(relation_id)})
return True
| |
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Datasets."""
import six
from google.cloud._helpers import _datetime_from_microseconds
from google.cloud.exceptions import NotFound
from google.cloud.bigquery.table import Table
from google.cloud.iterator import HTTPIterator
class AccessGrant(object):
"""Represent grant of an access role to an entity.
Every entry in the access list will have exactly one of
``userByEmail``, ``groupByEmail``, ``domain``, ``specialGroup`` or
``view`` set. And if anything but ``view`` is set, it'll also have a
``role`` specified. ``role`` is omitted for a ``view``, since
``view`` s are always read-only.
See https://cloud.google.com/bigquery/docs/reference/v2/datasets.
:type role: str
:param role: Role granted to the entity. One of
* ``'OWNER'``
* ``'WRITER'``
* ``'READER'``
May also be ``None`` if the ``entity_type`` is ``view``.
:type entity_type: str
:param entity_type: Type of entity being granted the role. One of
:attr:`ENTITY_TYPES`.
:type entity_id: str
:param entity_id: ID of entity being granted the role.
:raises: :class:`ValueError` if the ``entity_type`` is not among
:attr:`ENTITY_TYPES`, or if a ``view`` has ``role`` set or
a non ``view`` **does not** have a ``role`` set.
"""
ENTITY_TYPES = frozenset(['userByEmail', 'groupByEmail', 'domain',
'specialGroup', 'view'])
"""Allowed entity types."""
def __init__(self, role, entity_type, entity_id):
if entity_type not in self.ENTITY_TYPES:
message = 'Entity type %r not among: %s' % (
entity_type, ', '.join(self.ENTITY_TYPES))
raise ValueError(message)
if entity_type == 'view':
if role is not None:
raise ValueError('Role must be None for a view. Received '
'role: %r' % (role,))
else:
if role is None:
raise ValueError('Role must be set for entity '
'type %r' % (entity_type,))
self.role = role
self.entity_type = entity_type
self.entity_id = entity_id
def __eq__(self, other):
return (
self.role == other.role and
self.entity_type == other.entity_type and
self.entity_id == other.entity_id)
def __repr__(self):
return '<AccessGrant: role=%s, %s=%s>' % (
self.role, self.entity_type, self.entity_id)
class Dataset(object):
"""Datasets are containers for tables.
See:
https://cloud.google.com/bigquery/docs/reference/v2/datasets
:type name: str
:param name: the name of the dataset
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: A client which holds credentials and project configuration
for the dataset (which requires a project).
:type access_grants: list of :class:`AccessGrant`
:param access_grants: roles granted to entities for this dataset
:type project: str
:param project: (Optional) project ID for the dataset (defaults to
the project of the client).
"""
_access_grants = None
def __init__(self, name, client, access_grants=(), project=None):
self.name = name
self._client = client
self._properties = {}
# Let the @property do validation.
self.access_grants = access_grants
self._project = project or client.project
@property
def project(self):
"""Project bound to the dataset.
:rtype: str
:returns: the project (derived from the client).
"""
return self._project
@property
def path(self):
"""URL path for the dataset's APIs.
:rtype: str
:returns: the path based on project and dataste name.
"""
return '/projects/%s/datasets/%s' % (self.project, self.name)
@property
def access_grants(self):
"""Dataset's access grants.
:rtype: list of :class:`AccessGrant`
:returns: roles granted to entities for this dataset
"""
return list(self._access_grants)
@access_grants.setter
def access_grants(self, value):
"""Update dataset's access grants
:type value: list of :class:`AccessGrant`
:param value: roles granted to entities for this dataset
:raises: TypeError if 'value' is not a sequence, or ValueError if
any item in the sequence is not an AccessGrant
"""
if not all(isinstance(field, AccessGrant) for field in value):
raise ValueError('Values must be AccessGrant instances')
self._access_grants = tuple(value)
@property
def created(self):
"""Datetime at which the dataset was created.
:rtype: ``datetime.datetime``, or ``NoneType``
:returns: the creation time (None until set from the server).
"""
creation_time = self._properties.get('creationTime')
if creation_time is not None:
# creation_time will be in milliseconds.
return _datetime_from_microseconds(1000.0 * creation_time)
@property
def dataset_id(self):
"""ID for the dataset resource.
:rtype: str, or ``NoneType``
:returns: the ID (None until set from the server).
"""
return self._properties.get('id')
@property
def etag(self):
"""ETag for the dataset resource.
:rtype: str, or ``NoneType``
:returns: the ETag (None until set from the server).
"""
return self._properties.get('etag')
@property
def modified(self):
"""Datetime at which the dataset was last modified.
:rtype: ``datetime.datetime``, or ``NoneType``
:returns: the modification time (None until set from the server).
"""
modified_time = self._properties.get('lastModifiedTime')
if modified_time is not None:
# modified_time will be in milliseconds.
return _datetime_from_microseconds(1000.0 * modified_time)
@property
def self_link(self):
"""URL for the dataset resource.
:rtype: str, or ``NoneType``
:returns: the URL (None until set from the server).
"""
return self._properties.get('selfLink')
@property
def default_table_expiration_ms(self):
"""Default expiration time for tables in the dataset.
:rtype: int, or ``NoneType``
:returns: The time in milliseconds, or None (the default).
"""
return self._properties.get('defaultTableExpirationMs')
@default_table_expiration_ms.setter
def default_table_expiration_ms(self, value):
"""Update default expiration time for tables in the dataset.
:type value: int
:param value: (Optional) new default time, in milliseconds
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.integer_types) and value is not None:
raise ValueError("Pass an integer, or None")
self._properties['defaultTableExpirationMs'] = value
@property
def description(self):
"""Description of the dataset.
:rtype: str, or ``NoneType``
:returns: The description as set by the user, or None (the default).
"""
return self._properties.get('description')
@description.setter
def description(self, value):
"""Update description of the dataset.
:type value: str
:param value: (Optional) new description
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties['description'] = value
@property
def friendly_name(self):
"""Title of the dataset.
:rtype: str, or ``NoneType``
:returns: The name as set by the user, or None (the default).
"""
return self._properties.get('friendlyName')
@friendly_name.setter
def friendly_name(self, value):
"""Update title of the dataset.
:type value: str
:param value: (Optional) new title
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties['friendlyName'] = value
@property
def location(self):
"""Location in which the dataset is hosted.
:rtype: str, or ``NoneType``
:returns: The location as set by the user, or None (the default).
"""
return self._properties.get('location')
@location.setter
def location(self, value):
"""Update location in which the dataset is hosted.
:type value: str
:param value: (Optional) new location
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties['location'] = value
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct a dataset given its API representation
:type resource: dict
:param resource: dataset resource representation returned from the API
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: Client which holds credentials and project
configuration for the dataset.
:rtype: :class:`google.cloud.bigquery.dataset.Dataset`
:returns: Dataset parsed from ``resource``.
"""
if ('datasetReference' not in resource or
'datasetId' not in resource['datasetReference']):
raise KeyError('Resource lacks required identity information:'
'["datasetReference"]["datasetId"]')
name = resource['datasetReference']['datasetId']
dataset = cls(name, client=client)
dataset._set_properties(resource)
return dataset
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:rtype: :class:`google.cloud.bigquery.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self._client
return client
@staticmethod
def _parse_access_grants(access):
"""Parse a resource fragment into a set of access grants.
``role`` augments the entity type and present **unless** the entity
type is ``view``.
:type access: list of mappings
:param access: each mapping represents a single access grant.
:rtype: list of :class:`AccessGrant`
:returns: a list of parsed grants.
:raises: :class:`ValueError` if a grant in ``access`` has more keys
than ``role`` and one additional key.
"""
result = []
for grant in access:
grant = grant.copy()
role = grant.pop('role', None)
entity_type, entity_id = grant.popitem()
if len(grant) != 0:
raise ValueError('Grant has unexpected keys remaining.', grant)
result.append(
AccessGrant(role, entity_type, entity_id))
return result
def _set_properties(self, api_response):
"""Update properties from resource in body of ``api_response``
:type api_response: httplib2.Response
:param api_response: response returned from an API call.
"""
self._properties.clear()
cleaned = api_response.copy()
access = cleaned.pop('access', ())
self.access_grants = self._parse_access_grants(access)
if 'creationTime' in cleaned:
cleaned['creationTime'] = float(cleaned['creationTime'])
if 'lastModifiedTime' in cleaned:
cleaned['lastModifiedTime'] = float(cleaned['lastModifiedTime'])
if 'defaultTableExpirationMs' in cleaned:
cleaned['defaultTableExpirationMs'] = int(
cleaned['defaultTableExpirationMs'])
self._properties.update(cleaned)
def _build_access_resource(self):
"""Generate a resource fragment for dataset's access grants."""
result = []
for grant in self.access_grants:
info = {grant.entity_type: grant.entity_id}
if grant.role is not None:
info['role'] = grant.role
result.append(info)
return result
def _build_resource(self):
"""Generate a resource for ``create`` or ``update``."""
resource = {
'datasetReference': {
'projectId': self.project, 'datasetId': self.name},
}
if self.default_table_expiration_ms is not None:
value = self.default_table_expiration_ms
resource['defaultTableExpirationMs'] = value
if self.description is not None:
resource['description'] = self.description
if self.friendly_name is not None:
resource['friendlyName'] = self.friendly_name
if self.location is not None:
resource['location'] = self.location
if len(self.access_grants) > 0:
resource['access'] = self._build_access_resource()
return resource
def create(self, client=None):
"""API call: create the dataset via a PUT request.
See:
https://cloud.google.com/bigquery/docs/reference/v2/tables/insert
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
"""
client = self._require_client(client)
path = '/projects/%s/datasets' % (self.project,)
api_response = client._connection.api_request(
method='POST', path=path, data=self._build_resource())
self._set_properties(api_response)
def exists(self, client=None):
"""API call: test for the existence of the dataset via a GET request
See
https://cloud.google.com/bigquery/docs/reference/v2/datasets/get
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:rtype: bool
:returns: Boolean indicating existence of the dataset.
"""
client = self._require_client(client)
try:
client._connection.api_request(method='GET', path=self.path,
query_params={'fields': 'id'})
except NotFound:
return False
else:
return True
def reload(self, client=None):
"""API call: refresh dataset properties via a GET request.
See
https://cloud.google.com/bigquery/docs/reference/v2/datasets/get
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
"""
client = self._require_client(client)
api_response = client._connection.api_request(
method='GET', path=self.path)
self._set_properties(api_response)
def patch(self, client=None, **kw):
"""API call: update individual dataset properties via a PATCH request.
See
https://cloud.google.com/bigquery/docs/reference/v2/datasets/patch
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:type kw: ``dict``
:param kw: properties to be patched.
:raises: ValueError for invalid value types.
"""
client = self._require_client(client)
partial = {}
if 'default_table_expiration_ms' in kw:
value = kw['default_table_expiration_ms']
if not isinstance(value, six.integer_types) and value is not None:
raise ValueError("Pass an integer, or None")
partial['defaultTableExpirationMs'] = value
if 'description' in kw:
partial['description'] = kw['description']
if 'friendly_name' in kw:
partial['friendlyName'] = kw['friendly_name']
if 'location' in kw:
partial['location'] = kw['location']
api_response = client._connection.api_request(
method='PATCH', path=self.path, data=partial)
self._set_properties(api_response)
def update(self, client=None):
"""API call: update dataset properties via a PUT request.
See
https://cloud.google.com/bigquery/docs/reference/v2/datasets/update
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
"""
client = self._require_client(client)
api_response = client._connection.api_request(
method='PUT', path=self.path, data=self._build_resource())
self._set_properties(api_response)
def delete(self, client=None):
"""API call: delete the dataset via a DELETE request.
See:
https://cloud.google.com/bigquery/docs/reference/v2/tables/delete
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
"""
client = self._require_client(client)
client._connection.api_request(method='DELETE', path=self.path)
def list_tables(self, max_results=None, page_token=None):
"""List tables for the project associated with this client.
See:
https://cloud.google.com/bigquery/docs/reference/v2/tables/list
:type max_results: int
:param max_results: (Optional) Maximum number of tables to return.
If not passed, defaults to a value set by the API.
:type page_token: str
:param page_token: (Optional) Opaque marker for the next "page" of
datasets. If not passed, the API will return the
first page of datasets.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.bigquery.table.Table`
contained within the current dataset.
"""
path = '/projects/%s/datasets/%s/tables' % (self.project, self.name)
result = HTTPIterator(client=self._client, path=path,
item_to_value=_item_to_table, items_key='tables',
page_token=page_token, max_results=max_results)
result.dataset = self
return result
def table(self, name, schema=()):
"""Construct a table bound to this dataset.
:type name: str
:param name: Name of the table.
:type schema: list of :class:`google.cloud.bigquery.table.SchemaField`
:param schema: The table's schema
:rtype: :class:`google.cloud.bigquery.table.Table`
:returns: a new ``Table`` instance
"""
return Table(name, dataset=self, schema=schema)
def _item_to_table(iterator, resource):
"""Convert a JSON table to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a table.
:rtype: :class:`~google.cloud.bigquery.table.Table`
:returns: The next table in the page.
"""
return Table.from_api_repr(resource, iterator.dataset)
| |
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tool for generating a client library.
Relevant links:
https://developers.google.com/discovery/v1/reference/apis#resource
"""
import datetime
from apitools.gen import command_registry
from apitools.gen import message_registry
from apitools.gen import service_registry
from apitools.gen import util
def _ApitoolsVersion():
"""Returns version of the currently installed google-apitools package."""
try:
import pkg_resources
except ImportError:
return 'X.X.X'
try:
return pkg_resources.get_distribution('google-apitools').version
except pkg_resources.DistributionNotFound:
return 'X.X.X'
def _StandardQueryParametersSchema(discovery_doc):
"""Sets up dict of standard query parameters."""
standard_query_schema = {
'id': 'StandardQueryParameters',
'type': 'object',
'description': 'Query parameters accepted by all methods.',
'properties': discovery_doc.get('parameters', {}),
}
# We add an entry for the trace, since Discovery doesn't.
standard_query_schema['properties']['trace'] = {
'type': 'string',
'description': ('A tracing token of the form "token:<tokenid>" '
'to include in api requests.'),
'location': 'query',
}
return standard_query_schema
class DescriptorGenerator(object):
"""Code generator for a given discovery document."""
def __init__(self, discovery_doc, client_info, names, root_package, outdir,
base_package, protorpc_package, generate_cli=False,
init_wildcards_file=True,
use_proto2=False, unelidable_request_methods=None,
apitools_version=''):
self.__discovery_doc = discovery_doc
self.__client_info = client_info
self.__outdir = outdir
self.__use_proto2 = use_proto2
self.__description = util.CleanDescription(
self.__discovery_doc.get('description', ''))
self.__package = self.__client_info.package
self.__version = self.__client_info.version
self.__revision = discovery_doc.get('revision', '1')
self.__generate_cli = generate_cli
self.__init_wildcards_file = init_wildcards_file
self.__root_package = root_package
self.__base_files_package = base_package
self.__protorpc_package = protorpc_package
self.__names = names
# Order is important here: we need the schemas before we can
# define the services.
self.__message_registry = message_registry.MessageRegistry(
self.__client_info, self.__names, self.__description,
self.__root_package, self.__base_files_package,
self.__protorpc_package)
schemas = self.__discovery_doc.get('schemas', {})
for schema_name, schema in sorted(schemas.items()):
self.__message_registry.AddDescriptorFromSchema(
schema_name, schema)
# We need to add one more message type for the global parameters.
standard_query_schema = _StandardQueryParametersSchema(
self.__discovery_doc)
self.__message_registry.AddDescriptorFromSchema(
standard_query_schema['id'], standard_query_schema)
# Now that we know all the messages, we need to correct some
# fields from MessageFields to EnumFields.
self.__message_registry.FixupMessageFields()
self.__command_registry = command_registry.CommandRegistry(
self.__package, self.__version, self.__client_info,
self.__message_registry, self.__root_package,
self.__base_files_package, self.__protorpc_package,
self.__names)
self.__command_registry.AddGlobalParameters(
self.__message_registry.LookupDescriptorOrDie(
'StandardQueryParameters'))
self.__services_registry = service_registry.ServiceRegistry(
self.__client_info,
self.__message_registry,
self.__command_registry,
self.__names,
self.__root_package,
self.__base_files_package,
unelidable_request_methods or [])
services = self.__discovery_doc.get('resources', {})
for service_name, methods in sorted(services.items()):
self.__services_registry.AddServiceFromResource(
service_name, methods)
# We might also have top-level methods.
api_methods = self.__discovery_doc.get('methods', [])
if api_methods:
self.__services_registry.AddServiceFromResource(
'api', {'methods': api_methods})
# pylint: disable=protected-access
self.__client_info = self.__client_info._replace(
scopes=self.__services_registry.scopes)
# The apitools version that will be used in prerequisites for the
# generated packages.
self.__apitools_version = (
apitools_version if apitools_version else _ApitoolsVersion())
@property
def client_info(self):
return self.__client_info
@property
def discovery_doc(self):
return self.__discovery_doc
@property
def names(self):
return self.__names
@property
def outdir(self):
return self.__outdir
@property
def package(self):
return self.__package
@property
def use_proto2(self):
return self.__use_proto2
@property
def apitools_version(self):
return self.__apitools_version
def _GetPrinter(self, out):
printer = util.SimplePrettyPrinter(out)
return printer
def WriteInit(self, out):
"""Write a simple __init__.py for the generated client."""
printer = self._GetPrinter(out)
if self.__init_wildcards_file:
printer('"""Common imports for generated %s client library."""',
self.__client_info.package)
printer('# pylint:disable=wildcard-import')
else:
printer('"""Package marker file."""')
printer()
printer('import pkgutil')
printer()
if self.__init_wildcards_file:
printer('from %s import *', self.__base_files_package)
if self.__root_package == '.':
import_prefix = ''
else:
import_prefix = '%s.' % self.__root_package
if self.__generate_cli:
printer('from %s%s import *',
import_prefix, self.__client_info.cli_rule_name)
printer('from %s%s import *',
import_prefix, self.__client_info.client_rule_name)
printer('from %s%s import *',
import_prefix, self.__client_info.messages_rule_name)
printer()
printer('__path__ = pkgutil.extend_path(__path__, __name__)')
def WriteIntermediateInit(self, out):
"""Write a simple __init__.py for an intermediate directory."""
printer = self._GetPrinter(out)
printer('#!/usr/bin/env python')
printer('"""Shared __init__.py for apitools."""')
printer()
printer('from pkgutil import extend_path')
printer('__path__ = extend_path(__path__, __name__)')
def WriteSetupPy(self, out):
"""Write a setup.py for upload to PyPI."""
printer = self._GetPrinter(out)
year = datetime.datetime.now().year
printer('# Copyright %s Google Inc. All Rights Reserved.' % year)
printer('#')
printer('# Licensed under the Apache License, Version 2.0 (the'
'"License");')
printer('# you may not use this file except in compliance with '
'the License.')
printer('# You may obtain a copy of the License at')
printer('#')
printer('# http://www.apache.org/licenses/LICENSE-2.0')
printer('#')
printer('# Unless required by applicable law or agreed to in writing, '
'software')
printer('# distributed under the License is distributed on an "AS IS" '
'BASIS,')
printer('# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either '
'express or implied.')
printer('# See the License for the specific language governing '
'permissions and')
printer('# limitations under the License.')
printer()
printer('import setuptools')
printer('REQUIREMENTS = [')
with printer.Indent(indent=' '):
parts = self.apitools_version.split('.')
major = parts.pop(0)
minor = parts.pop(0)
printer('"google-apitools>=%s,~=%s.%s",',
self.apitools_version, major, minor)
printer('"httplib2>=0.9",')
printer('"oauth2client>=1.4.12",')
printer(']')
printer('_PACKAGE = "apitools.clients.%s"' % self.__package)
printer()
printer('setuptools.setup(')
# TODO(craigcitro): Allow customization of these options.
with printer.Indent(indent=' '):
printer('name="google-apitools-%s-%s",',
self.__package, self.__version)
printer('version="%s.%s",',
self.apitools_version, self.__revision)
printer('description="Autogenerated apitools library for %s",' % (
self.__package,))
printer('url="https://github.com/google/apitools",')
printer('author="Craig Citro",')
printer('author_email="craigcitro@google.com",')
printer('packages=setuptools.find_packages(),')
printer('install_requires=REQUIREMENTS,')
printer('classifiers=[')
with printer.Indent(indent=' '):
printer('"Programming Language :: Python :: 2.7",')
printer('"License :: OSI Approved :: Apache Software '
'License",')
printer('],')
printer('license="Apache 2.0",')
printer('keywords="apitools apitools-%s %s",' % (
self.__package, self.__package))
printer(')')
def WriteMessagesFile(self, out):
self.__message_registry.WriteFile(self._GetPrinter(out))
def WriteMessagesProtoFile(self, out):
self.__message_registry.WriteProtoFile(self._GetPrinter(out))
def WriteServicesProtoFile(self, out):
self.__services_registry.WriteProtoFile(self._GetPrinter(out))
def WriteClientLibrary(self, out):
self.__services_registry.WriteFile(self._GetPrinter(out))
def WriteCli(self, out):
self.__command_registry.WriteFile(self._GetPrinter(out))
| |
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Ioan Sucan
from geometry_msgs.msg import Pose, PoseStamped
from moveit_msgs.msg import RobotTrajectory, Grasp, PlaceLocation, Constraints
from sensor_msgs.msg import JointState
import rospy
import tf
from moveit_ros_planning_interface import _moveit_move_group_interface
from exception import MoveItCommanderException
import conversions
class MoveGroupCommander(object):
"""
Execution of simple commands for a particular group
"""
def __init__(self, name, robot_description="robot_description"):
""" Specify the group name for which to construct this commander instance. Throws an exception if there is an initialization error. """
self._g = _moveit_move_group_interface.MoveGroup(name, robot_description)
def get_name(self):
""" Get the name of the group this instance was initialized for """
return self._g.get_name()
def stop(self):
""" Stop the current execution, if any """
self._g.stop()
def get_active_joints(self):
""" Get the active joints of this group """
return self._g.get_active_joints()
def get_joints(self):
""" Get the joints of this group """
return self._g.get_joints()
def get_variable_count(self):
""" Return the number of variables used to parameterize a state in this group (larger or equal to number of DOF)"""
return self._g.get_variable_count()
def has_end_effector_link(self):
""" Check if this group has a link that is considered to be an end effector """
return len(self._g.get_end_effector_link()) > 0
def get_end_effector_link(self):
""" Get the name of the link that is considered to be an end-effector. Return an empty string if there is no end-effector. """
return self._g.get_end_effector_link()
def set_end_effector_link(self, link_name):
""" Set the name of the link to be considered as an end effector """
if not self._g.set_end_effector_link(link_name):
raise MoveItCommanderException("Unable to set end efector link")
def get_pose_reference_frame(self):
""" Get the reference frame assumed for poses of end-effectors """
return self._g.get_pose_reference_frame()
def set_pose_reference_frame(self, reference_frame):
""" Set the reference frame to assume for poses of end-effectors """
self._g.set_pose_reference_frame(reference_frame)
def get_planning_frame(self):
""" Get the name of the frame where all planning is performed """
return self._g.get_planning_frame()
def get_current_joint_values(self):
""" Get the current configuration of the group as a list (these are values published on /joint_states) """
return self._g.get_current_joint_values()
def get_current_pose(self, end_effector_link = ""):
""" Get the current pose of the end-effector of the group. Throws an exception if there is not end-effector. """
if len(end_effector_link) > 0 or self.has_end_effector_link():
return conversions.list_to_pose_stamped(self._g.get_current_pose(end_effector_link), self.get_planning_frame())
else:
raise MoveItCommanderException("There is no end effector to get the pose of")
def get_current_rpy(self, end_effector_link = ""):
""" Get a list of 3 elements defining the [roll, pitch, yaw] of the end-effector. Throws an exception if there is not end-effector. """
if len(end_effector_link) > 0 or self.has_end_effector_link():
return self._g.get_current_rpy(end_effector_link)
else:
raise MoveItCommanderException("There is no end effector to get the rpy of")
def get_random_joint_values(self):
return self._g.get_random_joint_values()
def get_random_pose(self, end_effector_link = ""):
if len(end_effector_link) > 0 or self.has_end_effector_link():
return conversions.list_to_pose_stamped(self._g.get_random_pose(end_effector_link), self.get_planning_frame())
else:
raise MoveItCommanderException("There is no end effector to get the pose of")
def set_start_state_to_current_state(self):
self._g.set_start_state_to_current_state()
def set_start_state(self, msg):
"""
Specify a start state for the group.
Parameters
----------
msg : moveit_msgs/RobotState
Examples
--------
>>> from moveit_msgs.msg import RobotState
>>> from sensor_msgs.msg import JointState
>>> joint_state = JointState()
>>> joint_state.header = Header()
>>> joint_state.header.stamp = rospy.Time.now()
>>> joint_state.name = ['joint_a', 'joint_b']
>>> joint_state.position = [0.17, 0.34]
>>> moveit_robot_state = RobotState()
>>> moveit_robot_state.joint_state = joint_state
>>> group.set_start_state(moveit_robot_state)
"""
self._g.set_start_state(conversions.msg_to_string(msg))
def get_joint_value_target(self):
return self._g.get_joint_value_target()
def set_joint_value_target(self, arg1, arg2 = None, arg3 = None):
"""
Specify a target joint configuration for the group.
- if the type of arg1 is one of the following: dict, list, JointState message, then no other arguments should be provided.
The dict should specify pairs of joint variable names and their target values, the list should specify all the variable values
for the group. The JointState message specifies the positions of some single-dof joints.
- if the type of arg1 is string, then arg2 is expected to be defined and be either a real value or a list of real values. This is
interpreted as setting a particular joint to a particular value.
- if the type of arg1 is Pose or PoseStamped, both arg2 and arg3 could be defined. If arg2 or arg3 are defined, their types must
be either string or bool. The string type argument is interpreted as the end-effector the pose is specified for (default is to use
the default end-effector), and the bool is used to decide whether the pose specified is approximate (default is false). This situation
allows setting the joint target of the group by calling IK. This does not send a pose to the planner and the planner will do no IK.
Instead, one IK solution will be computed first, and that will be sent to the planner.
"""
if type(arg1) is JointState:
if (arg2 != None or arg3 != None):
raise MoveItCommanderException("Too many arguments specified")
if not self._g.set_joint_value_target_from_joint_state_message(conversions.msg_to_string(arg1)):
raise MoveItCommanderException("Error setting joint target. Is the target within bounds?")
elif (type(arg1) is str):
if (arg2 == None):
raise MoveItCommanderException("Joint value expected when joint name specified")
if (arg3 != None):
raise MoveItCommanderException("Too many arguments specified")
if not self._g.set_joint_value_target(arg1, arg2):
raise MoveItCommanderException("Error setting joint target. Is the target within bounds?")
elif (type(arg1) is PoseStamped) or (type(arg1) is Pose):
approx = False
eef = ""
if (arg2 != None):
if type(arg2) is str:
eef = arg2
else:
if type(arg2) is bool:
approx = arg2
else:
raise MoveItCommanderException("Unexpected type")
if (arg3 != None):
if type(arg3) is str:
eef = arg3
else:
if type(arg3) is bool:
approx = arg3
else:
raise MoveItCommanderException("Unexpected type")
r = False
if type(arg1) is PoseStamped:
r = self._g.set_joint_value_target_from_pose_stamped(conversions.msg_to_string(arg1), eef, approx)
else:
r = self._g.set_joint_value_target_from_pose(conversions.msg_to_string(arg1), eef, approx)
if not r:
if approx:
raise MoveItCommanderException("Error setting joint target. Does your IK solver support approximate IK?")
else:
raise MoveItCommanderException("Error setting joint target. Is IK running?")
elif (hasattr(arg1, '__iter__')):
if (arg2 != None or arg3 != None):
raise MoveItCommanderException("Too many arguments specified")
if not self._g.set_joint_value_target(arg1):
raise MoveItCommanderException("Error setting joint target. Is the target within bounds?")
else:
raise MoveItCommanderException("Unsupported argument of type %s" % type(arg1))
def set_rpy_target(self, rpy, end_effector_link = ""):
""" Specify a target orientation for the end-effector. Any position of the end-effector is acceptable."""
if len(end_effector_link) > 0 or self.has_end_effector_link():
if len(rpy) == 3:
if not self._g.set_rpy_target(rpy[0], rpy[1], rpy[2], end_effector_link):
raise MoveItCommanderException("Unable to set orientation target")
else:
raise MoveItCommanderException("Expected [roll, pitch, yaw]")
else:
raise MoveItCommanderException("There is no end effector to set the pose for")
def set_orientation_target(self, q, end_effector_link = ""):
""" Specify a target orientation for the end-effector. Any position of the end-effector is acceptable."""
if len(end_effector_link) > 0 or self.has_end_effector_link():
if len(q) == 4:
if not self._g.set_orientation_target(q[0], q[1], q[2], q[3], end_effector_link):
raise MoveItCommanderException("Unable to set orientation target")
else:
raise MoveItCommanderException("Expected [qx, qy, qz, qw]")
else:
raise MoveItCommanderException("There is no end effector to set the pose for")
def set_position_target(self, xyz, end_effector_link = ""):
""" Specify a target position for the end-effector. Any orientation of the end-effector is acceptable."""
if len(end_effector_link) > 0 or self.has_end_effector_link():
if not self._g.set_position_target(xyz[0], xyz[1], xyz[2], end_effector_link):
raise MoveItCommanderException("Unable to set position target")
else:
raise MoveItCommanderException("There is no end effector to set the pose for")
def set_pose_target(self, pose, end_effector_link = ""):
""" Set the pose of the end-effector, if one is available. The expected input is a Pose message, a PoseStamped message or a list of 6 floats:"""
""" [x, y, z, rot_x, rot_y, rot_z] or a list of 7 floats [x, y, z, qx, qy, qz, qw] """
if len(end_effector_link) > 0 or self.has_end_effector_link():
ok = False
if type(pose) is PoseStamped:
old = self.get_pose_reference_frame()
self.set_pose_reference_frame(pose.header.frame_id)
ok = self._g.set_pose_target(conversions.pose_to_list(pose.pose), end_effector_link)
self.set_pose_reference_frame(old)
elif type(pose) is Pose:
ok = self._g.set_pose_target(conversions.pose_to_list(pose), end_effector_link)
else:
ok = self._g.set_pose_target(pose, end_effector_link)
if not ok:
raise MoveItCommanderException("Unable to set target pose")
else:
raise MoveItCommanderException("There is no end effector to set the pose for")
def set_pose_targets(self, poses, end_effector_link = ""):
""" Set the pose of the end-effector, if one is available. The expected input is a list of poses. Each pose can be a Pose message, a list of 6 floats: [x, y, z, rot_x, rot_y, rot_z] or a list of 7 floats [x, y, z, qx, qy, qz, qw] """
if len(end_effector_link) > 0 or self.has_end_effector_link():
if not self._g.set_pose_targets([conversions.pose_to_list(p) if type(p) is Pose else p for p in poses], end_effector_link):
raise MoveItCommanderException("Unable to set target poses")
else:
raise MoveItCommanderException("There is no end effector to set poses for")
def shift_pose_target(self, axis, value, end_effector_link = ""):
""" Get the current pose of the end effector, add value to the corresponding axis (0..5: X, Y, Z, R, P, Y) and set the new pose as the pose target """
if len(end_effector_link) > 0 or self.has_end_effector_link():
pose = self._g.get_current_pose(end_effector_link)
# by default we get orientation as a quaternion list
# if we are updating a rotation axis however, we convert the orientation to RPY
if axis > 2:
(r, p, y) = tf.transformations.euler_from_quaternion(pose[3:])
pose = [pose[0], pose[1], pose[2], r, p, y]
if axis >= 0 and axis < 6:
pose[axis] = pose[axis] + value
self.set_pose_target(pose, end_effector_link)
else:
raise MoveItCommanderException("An axis value between 0 and 5 expected")
else:
raise MoveItCommanderException("There is no end effector to set poses for")
def clear_pose_target(self, end_effector_link):
""" Clear the pose target for a particular end-effector """
self._g.clear_pose_target(end_effector_link)
def clear_pose_targets(self):
""" Clear all known pose targets """
self._g.clear_pose_targets()
def set_random_target(self):
""" Set a random joint configuration target """
self._g.set_random_target()
def set_named_target(self, name):
""" Set a joint configuration by name. The name can be a name previlusy remembered with remember_joint_values() or a configuration specified in the SRDF. """
if not self._g.set_named_target(name):
raise MoveItCommanderException("Unable to set target %s. Is the target within bounds?" % name)
def remember_joint_values(self, name, values = None):
""" Record the specified joint configuration of the group under the specified name. If no values are specified, the current state of the group is recorded. """
if values == None:
values = self.get_current_joint_values()
self._g.remember_joint_values(name, values)
def get_remembered_joint_values(self):
""" Get a dictionary that maps names to joint configurations for the group """
return self._g.get_remembered_joint_values()
def forget_joint_values(self, name):
""" Forget a stored joint configuration """
self._g.forget_joint_values(name)
def get_goal_tolerance(self):
""" Return a tuple of goal tolerances: joint, position and orientation. """
return (self.get_goal_joint_tolerance(), self.get_goal_position_tolerance(), self.get_goal_orientation_tolerance())
def get_goal_joint_tolerance(self):
""" Get the tolerance for achieving a joint goal (distance for each joint variable) """
return self._g.get_goal_joint_tolerance()
def get_goal_position_tolerance(self):
""" When moving to a position goal or to a pose goal, the tolerance for the goal position is specified as the radius a sphere around the target origin of the end-effector """
return self._g.get_goal_position_tolerance()
def get_goal_orientation_tolerance(self):
""" When moving to an orientation goal or to a pose goal, the tolerance for the goal orientation is specified as the distance (roll, pitch, yaw) to the target origin of the end-effector """
return self._g.get_goal_orientation_tolerance()
def set_goal_tolerance(self, value):
""" Set the joint, position and orientation goal tolerances simultaneously """
self._g.set_goal_tolerance(value)
def set_goal_joint_tolerance(self, value):
""" Set the tolerance for a target joint configuration """
self._g.set_goal_joint_tolerance(value)
def set_goal_position_tolerance(self, value):
""" Set the tolerance for a target end-effector position """
self._g.set_goal_position_tolerance(value)
def set_goal_orientation_tolerance(self, value):
""" Set the tolerance for a target end-effector orientation """
self._g.set_goal_orientation_tolerance(value)
def allow_looking(self, value):
""" Enable/disable looking around for motion planning """
self._g.allow_looking(value)
def allow_replanning(self, value):
""" Enable/disable replanning """
self._g.allow_replanning(value)
def get_known_constraints(self):
""" Get a list of names for the constraints specific for this group, as read from the warehouse """
return self._g.get_known_constraints()
def get_path_constraints(self):
""" Get the acutal path constraints in form of a moveit_msgs.msgs.Constraints """
c = Constraints()
c_str = self._g.get_path_constraints()
conversions.msg_from_string(c,c_str)
return c
def set_path_constraints(self, value):
""" Specify the path constraints to be used (as read from the database) """
if value == None:
self.clear_path_constraints()
else:
if type(value) is Constraints:
self._g.set_path_constraints_from_msg(conversions.msg_to_string(value))
elif not self._g.set_path_constraints(value):
raise MoveItCommanderException("Unable to set path constraints " + value)
def clear_path_constraints(self):
""" Specify that no path constraints are to be used during motion planning """
self._g.clear_path_constraints()
def set_constraints_database(self, host, port):
""" Specify which database to connect to for loading possible path constraints """
self._g.set_constraints_database(host, port)
def set_planning_time(self, seconds):
""" Specify the amount of time to be used for motion planning. """
self._g.set_planning_time(seconds)
def get_planning_time(self):
""" Specify the amount of time to be used for motion planning. """
return self._g.get_planning_time()
def set_planner_id(self, planner_id):
""" Specify which planner to use when motion planning """
self._g.set_planner_id(planner_id)
def set_num_planning_attempts(self, num_planning_attempts):
""" Set the number of times the motion plan is to be computed from scratch before the shortest solution is returned. The default value is 1. """
self._g.set_num_planning_attempts(num_planning_attempts)
def set_workspace(self, ws):
""" Set the workspace for the robot as either [], [minX, minY, maxX, maxY] or [minX, minY, minZ, maxX, maxY, maxZ] """
if len(ws) == 0:
self._g.set_workspace(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
else:
if len(ws) == 4:
self._g.set_workspace(ws[0], ws[1], 0.0, ws[2], ws[3], 0.0)
else:
if len(ws) == 6:
self._g.set_workspace(ws[0], ws[1], ws[2], ws[3], ws[4], ws[5])
else:
raise MoveItCommanderException("Expected 0, 4 or 6 values in list specifying workspace")
def set_max_velocity_scaling_factor(self, value):
""" Set a scaling factor for optionally reducing the maximum joint velocity. Allowed values are in (0,1]. """
if value > 0 and value <= 1:
self._g.set_max_velocity_scaling_factor(value)
else:
raise MoveItCommanderException("Expected value in the range from 0 to 1 for scaling factor" )
def set_max_acceleration_scaling_factor(self, value):
""" Set a scaling factor for optionally reducing the maximum joint acceleration. Allowed values are in (0,1]. """
if value > 0 and value <= 1:
self._g.set_max_acceleration_scaling_factor(value)
else:
raise MoveItCommanderException("Expected value in the range from 0 to 1 for scaling factor" )
def go(self, joints = None, wait = True):
""" Set the target of the group and then move the group to the specified target """
if type(joints) is bool:
wait = joints
joints = None
elif type(joints) is JointState:
self.set_joint_value_target(joints)
elif type(joints) is Pose:
self.set_pose_target(joints)
elif not joints == None:
try:
self.set_joint_value_target(self.get_remembered_joint_values()[joints])
except:
self.set_joint_value_target(joints)
if wait:
return self._g.move()
else:
return self._g.async_move()
def plan(self, joints = None):
""" Return a motion plan (a RobotTrajectory) to the set goal state (or specified by the joints argument) """
if type(joints) is JointState:
self.set_joint_value_target(joints)
elif type(joints) is Pose:
self.set_pose_target(joints)
elif not joints == None:
try:
self.set_joint_value_target(self.get_remembered_joint_values()[joints])
except:
self.set_joint_value_target(joints)
plan = RobotTrajectory()
plan.deserialize(self._g.compute_plan())
return plan
def compute_cartesian_path(self, waypoints, eef_step, jump_threshold, avoid_collisions = True):
""" Compute a sequence of waypoints that make the end-effector move in straight line segments that follow the poses specified as waypoints. Configurations are computed for every eef_step meters; The jump_threshold specifies the maximum distance in configuration space between consecutive points in the resultingpath. The return value is a tuple: a fraction of how much of the path was followed, the actual RobotTrajectory. """
(ser_path, fraction) = self._g.compute_cartesian_path([conversions.pose_to_list(p) for p in waypoints], eef_step, jump_threshold, avoid_collisions)
path = RobotTrajectory()
path.deserialize(ser_path)
return (path, fraction)
def execute(self, plan_msg, wait = True):
"""Execute a previously planned path"""
if wait:
return self._g.execute(conversions.msg_to_string(plan_msg))
else:
return self._g.async_execute(conversions.msg_to_string(plan_msg))
def attach_object(self, object_name, link_name = "", touch_links = []):
""" Given the name of an object existing in the planning scene, attach it to a link. The link used is specified by the second argument. If left unspecified, the end-effector link is used, if one is known. If there is no end-effector link, the first link in the group is used. If no link is identified, failure is reported. True is returned if an attach request was succesfully sent to the move_group node. This does not verify that the attach request also was successfuly applied by move_group."""
return self._g.attach_object(object_name, link_name, touch_links)
def detach_object(self, name = ""):
""" Given the name of a link, detach the object(s) from that link. If no such link exists, the name is interpreted as an object name. If there is no name specified, an attempt is made to detach all objects attached to any link in the group."""
return self._g.detach_object(name)
def pick(self, object_name, grasp = []):
"""Pick the named object. A grasp message, or a list of Grasp messages can also be specified as argument."""
if type(grasp) is Grasp:
return self._g.pick(object_name, conversions.msg_to_string(grasp))
else:
return self._g.pick(object_name, [conversions.msg_to_string(x) for x in grasp])
def place(self, object_name, location=None):
"""Place the named object at a particular location in the environment or somewhere safe in the world if location is not provided"""
result = False
if location is None:
result = self._g.place(object_name)
elif type(location) is PoseStamped:
old = self.get_pose_reference_frame()
self.set_pose_reference_frame(location.header.frame_id)
result = self._g.place(object_name, conversions.pose_to_list(location.pose))
self.set_pose_reference_frame(old)
elif type(location) is Pose:
result = self._g.place(object_name, conversions.pose_to_list(location))
elif type(location) is PlaceLocation:
result = self._g.place(object_name, conversions.msg_to_string(location))
else:
raise MoveItCommanderException("Parameter location must be a Pose, PoseStamped or PlaceLocation object")
return result
def set_support_surface_name(self, value):
""" Set the support surface name for a place operation """
self._g.set_support_surface_name(value)
def retime_trajectory(self, ref_state_in, traj_in, velocity_scaling_factor):
ser_ref_state_in = conversions.msg_to_string(ref_state_in)
ser_traj_in = conversions.msg_to_string(traj_in)
ser_traj_out = self._g.retime_trajectory(ser_ref_state_in, ser_traj_in, velocity_scaling_factor)
traj_out = RobotTrajectory()
traj_out.deserialize(ser_traj_out)
return traj_out
| |
#
# Copyright (C) 2006-2016 Greg Landrum
# All Rights Reserved
#
import os
import re
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem.Draw.MolDrawing import MolDrawing, DrawingOptions
from rdkit.Chem.Draw.rdMolDraw2D import *
from rdkit.six import iteritems
def _getCanvas():
useAGG = False
useCairo = False
useSping = False
Canvas = None
if not os.environ.get('RDKIT_CANVAS', ''):
try:
from rdkit.Chem.Draw.cairoCanvas import Canvas
useCairo = True
except ImportError:
try:
from rdkit.Chem.Draw.aggCanvas import Canvas
useAGG = True
except ImportError:
from rdkit.Chem.Draw.spingCanvas import Canvas
useSping = True
else:
canv = os.environ['RDKIT_CANVAS'].lower()
if canv == 'cairo':
from rdkit.Chem.Draw.cairoCanvas import Canvas
useCairo = True
elif canv == 'agg':
from rdkit.Chem.Draw.aggCanvas import Canvas
useAGG = True
else:
from rdkit.Chem.Draw.spingCanvas import Canvas
useSping = True
if useSping:
# <- the sping canvas doesn't support unicode well
DrawingOptions.radicalSymbol = '.'
return useAGG, useCairo, Canvas
def _createCanvas(size):
useAGG, useCairo, Canvas = _getCanvas()
if useAGG or useCairo:
try:
import Image
except ImportError:
from PIL import Image
img = Image.new("RGBA", size, (0, 0, 0, 0))
canvas = Canvas(img)
else:
from rdkit.Chem.Draw.spingCanvas import Canvas
canvas = Canvas(size=size, name='MolToImageFile')
img = canvas._image
return img, canvas
def MolToImage(mol, size=(300, 300), kekulize=True, wedgeBonds=True, fitImage=False, options=None,
canvas=None, **kwargs):
"""Returns a PIL image containing a drawing of the molecule
ARGUMENTS:
- kekulize: run kekulization routine on input `mol` (default True)
- size: final image size, in pixel (default (300,300))
- wedgeBonds: draw wedge (stereo) bonds (default True)
- highlightAtoms: list of atoms to highlight (default [])
- highlightMap: dictionary of (atom, color) pairs (default None)
- highlightBonds: list of bonds to highlight (default [])
- highlightColor: RGB color as tuple (default [1, 0, 0])
NOTE:
use 'matplotlib.colors.to_rgb()' to convert string and
HTML color codes into the RGB tuple representation, eg.
from matplotlib.colors import ColorConverter
img = Draw.MolToImage(m, highlightAtoms=[1,2], highlightColor=ColorConverter().to_rgb('aqua'))
img.save("molecule.png")
RETURNS:
a PIL Image object
"""
if not mol:
raise ValueError('Null molecule provided')
if canvas is None:
img, canvas = _createCanvas(size)
else:
img = None
options = options or DrawingOptions()
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if 'highlightColor' in kwargs:
color = kwargs.pop('highlightColor', (1, 0, 0))
options.selectColor = color
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
if 'legend' in kwargs:
legend = kwargs['legend']
del kwargs['legend']
else:
legend = ''
drawer.AddMol(mol, **kwargs)
if legend:
from rdkit.Chem.Draw.MolDrawing import Font
bbox = drawer.boundingBoxes[mol]
pos = size[0] / 2, int(.94 * size[1]), 0 # the 0.94 is extremely empirical
# canvas.addCanvasPolygon(((bbox[0],bbox[1]),(bbox[2],bbox[1]),(bbox[2],bbox[3]),(bbox[0],bbox[3])),
# color=(1,0,0),fill=False,stroke=True)
# canvas.addCanvasPolygon(((0,0),(0,size[1]),(size[0],size[1]),(size[0],0) ),
# color=(0,0,1),fill=False,stroke=True)
font = Font(face='sans', size=12)
canvas.addCanvasText(legend, pos, font)
if kwargs.get('returnCanvas', False):
return img, canvas, drawer
else:
canvas.flush()
return img
def MolToFile(mol, fileName, size=(300, 300), kekulize=True, wedgeBonds=True, imageType=None,
fitImage=False, options=None, **kwargs):
""" Generates a drawing of a molecule and writes it to a file
"""
# original contribution from Uwe Hoffmann
if not fileName:
raise ValueError('no fileName provided')
if not mol:
raise ValueError('Null molecule provided')
if imageType is None:
imageType = os.path.splitext(fileName)[1][1:]
if options is None:
options = DrawingOptions()
useAGG, useCairo, Canvas = _getCanvas()
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if useCairo or useAGG:
canvas = Canvas(size=size, imageType=imageType, fileName=fileName)
else:
options.radicalSymbol = '.' # <- the sping canvas doesn't support unicode well
canvas = Canvas(size=size, name=fileName, imageType=imageType)
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol, **kwargs)
if useCairo or useAGG:
canvas.flush()
else:
canvas.save()
def MolToImageFile(mol, filename, size=(300, 300), kekulize=True, wedgeBonds=True, **kwargs):
""" DEPRECATED: please use MolToFile instead
"""
img = MolToImage(mol, size=size, kekulize=kekulize, wedgeBonds=wedgeBonds, **kwargs)
img.save(filename)
tkRoot = None
tkLabel = None
tkPI = None
def ShowMol(mol, size=(300, 300), kekulize=True, wedgeBonds=True, title='RDKit Molecule', **kwargs):
""" Generates a picture of a molecule and displays it in a Tkinter window
"""
global tkRoot, tkLabel, tkPI
try:
import Tkinter
except ImportError:
import tkinter as Tkinter
try:
import ImageTk
except ImportError:
from PIL import ImageTk
img = MolToImage(mol, size, kekulize, wedgeBonds, **kwargs)
if not tkRoot:
tkRoot = Tkinter.Tk()
tkRoot.title(title)
tkPI = ImageTk.PhotoImage(img)
tkLabel = Tkinter.Label(tkRoot, image=tkPI)
tkLabel.place(x=0, y=0, width=img.size[0], height=img.size[1])
else:
tkPI.paste(img)
tkRoot.geometry('%dx%d' % (img.size))
def MolToMPL(mol, size=(300, 300), kekulize=True, wedgeBonds=True, imageType=None, fitImage=False,
options=None, **kwargs):
""" Generates a drawing of a molecule on a matplotlib canvas
"""
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.mplCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor = None
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
omol = mol
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol, **kwargs)
omol._atomPs = drawer.atomPs[mol]
for k, v in iteritems(omol._atomPs):
omol._atomPs[k] = canvas.rescalePt(v)
canvas._figure.set_size_inches(float(size[0]) / 100, float(size[1]) / 100)
return canvas._figure
def calcAtomGaussians(mol, a=0.03, step=0.02, weights=None):
"""
useful things to do with these:
fig.axes[0].imshow(z,cmap=cm.gray,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k')
fig=Draw.MolToMPL(m);
contribs=Crippen.rdMolDescriptors._CalcCrippenContribs(m)
logps,mrs=zip(*contribs)
x,y,z=Draw.calcAtomGaussians(m,0.03,step=0.01,weights=logps)
fig.axes[0].imshow(z,cmap=cm.jet,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k',alpha=0.5)
fig.savefig('coumlogps.colored.png',bbox_inches='tight')
"""
import numpy
from matplotlib import mlab
x = numpy.arange(0, 1, step)
y = numpy.arange(0, 1, step)
X, Y = numpy.meshgrid(x, y)
if weights is None:
weights = [1.] * mol.GetNumAtoms()
Z = mlab.bivariate_normal(X, Y, a, a, mol._atomPs[0][0], mol._atomPs[0][1]) * weights[0]
for i in range(1, mol.GetNumAtoms()):
Zp = mlab.bivariate_normal(X, Y, a, a, mol._atomPs[i][0], mol._atomPs[i][1])
Z += Zp * weights[i]
return X, Y, Z
def MolsToImage(mols, subImgSize=(200, 200), legends=None, **kwargs):
"""
"""
try:
import Image
except ImportError:
from PIL import Image
if legends is None:
legends = [None] * len(mols)
res = Image.new("RGBA", (subImgSize[0] * len(mols), subImgSize[1]))
for i, mol in enumerate(mols):
res.paste(MolToImage(mol, subImgSize, legend=legends[i], **kwargs), (i * subImgSize[0], 0))
return res
def _moltoimg(mol, sz, highlights, legend, **kwargs):
try:
import Image
except ImportError:
from PIL import Image
if not hasattr(rdMolDraw2D, 'MolDraw2DCairo'):
img = MolToImage(mol, sz, legend=legend, highlightAtoms=highlights, **kwargs)
else:
nmol = rdMolDraw2D.PrepareMolForDrawing(mol, kekulize=kwargs.get('kekulize', True))
d2d = rdMolDraw2D.MolDraw2DCairo(sz[0], sz[1])
d2d.DrawMolecule(nmol, legend=legend, highlightAtoms=highlights)
from io import BytesIO
d2d.FinishDrawing()
sio = BytesIO(d2d.GetDrawingText())
img = Image.open(sio)
return img
def _MolsToGridImage(mols, molsPerRow=3, subImgSize=(200, 200), legends=None,
highlightAtomLists=None, **kwargs):
""" returns a PIL Image of the grid
"""
try:
import Image
except ImportError:
from PIL import Image
if legends is None:
legends = [''] * len(mols)
nRows = len(mols) // molsPerRow
if len(mols) % molsPerRow:
nRows += 1
res = Image.new("RGBA", (molsPerRow * subImgSize[0], nRows * subImgSize[1]), (255, 255, 255, 0))
for i, mol in enumerate(mols):
row = i // molsPerRow
col = i % molsPerRow
highlights = None
if highlightAtomLists and highlightAtomLists[i]:
highlights = highlightAtomLists[i]
if mol is not None:
img = _moltoimg(mol, subImgSize, highlights, legends[i], **kwargs)
res.paste(img, (col * subImgSize[0], row * subImgSize[1]))
return res
def _MolsToGridSVG(mols, molsPerRow=3, subImgSize=(200, 200), legends=None, highlightAtomLists=None,
stripSVGNamespace=True, **kwargs):
""" returns an SVG of the grid
"""
matcher = re.compile(r'^(<.*>\n)(<svg:rect .*</svg\:rect>\n)(.*)</svg\:svg>', re.DOTALL)
if legends is None:
legends = [''] * len(mols)
hdr = ''
ftr = '</svg:svg>'
rect = ''
nRows = len(mols) // molsPerRow
if len(mols) % molsPerRow:
nRows += 1
blocks = [''] * (nRows * molsPerRow)
fullSize = (molsPerRow * subImgSize[0], nRows * subImgSize[1])
for i, mol in enumerate(mols):
highlights = None
if highlightAtomLists and highlightAtomLists[i]:
highlights = highlightAtomLists[i]
if mol is not None:
nmol = rdMolDraw2D.PrepareMolForDrawing(mol, kekulize=kwargs.get('kekulize', True))
d2d = rdMolDraw2D.MolDraw2DSVG(subImgSize[0], subImgSize[1])
d2d.DrawMolecule(nmol, legend=legends[i], highlightAtoms=highlights)
d2d.FinishDrawing()
txt = d2d.GetDrawingText()
h, r, b = matcher.match(txt).groups()
if not hdr:
hdr = h.replace("width='%dpx' height='%dpx' >" % subImgSize,
"width='%dpx' height='%dpx' >" % fullSize)
if not rect:
rect = r
blocks[i] = b
for i, elem in enumerate(blocks):
row = i // molsPerRow
col = i % molsPerRow
elem = rect + elem
blocks[i] = '<g transform="translate(%d,%d)" >%s</g>' % (col * subImgSize[0],
row * subImgSize[1], elem)
res = hdr + '\n'.join(blocks) + ftr
if stripSVGNamespace:
res = res.replace('svg:', '')
return res
def MolsToGridImage(mols, molsPerRow=3, subImgSize=(200, 200), legends=None,
highlightAtomLists=None, useSVG=False, **kwargs):
if useSVG:
return _MolsToGridSVG(mols, molsPerRow=molsPerRow, subImgSize=subImgSize, legends=legends,
highlightAtomLists=highlightAtomLists, **kwargs)
else:
return _MolsToGridImage(mols, molsPerRow=molsPerRow, subImgSize=subImgSize, legends=legends,
highlightAtomLists=highlightAtomLists, **kwargs)
def ReactionToImage(rxn, subImgSize=(200, 200), **kwargs):
"""
"""
try:
import Image
except ImportError:
from PIL import Image
mols = []
for i in range(rxn.GetNumReactantTemplates()):
tmpl = rxn.GetReactantTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
mols.append(None)
for i in range(rxn.GetNumProductTemplates()):
tmpl = rxn.GetProductTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
res = Image.new("RGBA", (subImgSize[0] * len(mols), subImgSize[1]), (255, 255, 255, 0))
for i, mol in enumerate(mols):
if mol is not None:
nimg = MolToImage(mol, subImgSize, kekulize=False, **kwargs)
else:
nimg, canvas = _createCanvas(subImgSize)
p0 = (10, subImgSize[1] // 2)
p1 = (subImgSize[0] - 10, subImgSize[1] // 2)
p3 = (subImgSize[0] - 20, subImgSize[1] // 2 - 10)
p4 = (subImgSize[0] - 20, subImgSize[1] // 2 + 10)
canvas.addCanvasLine(p0, p1, lineWidth=2, color=(0, 0, 0))
canvas.addCanvasLine(p3, p1, lineWidth=2, color=(0, 0, 0))
canvas.addCanvasLine(p4, p1, lineWidth=2, color=(0, 0, 0))
if hasattr(canvas, 'flush'):
canvas.flush()
else:
canvas.save()
res.paste(nimg, (i * subImgSize[0], 0))
return res
def MolToQPixmap(mol, size=(300, 300), kekulize=True, wedgeBonds=True, fitImage=False, options=None,
**kwargs):
""" Generates a drawing of a molecule on a Qt QPixmap
"""
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.qtCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor = None
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
drawer.AddMol(mol, **kwargs)
canvas.flush()
return canvas.pixmap
| |
import copy
import sys
if sys.version_info < (3,):
range = xrange
import numpy as np
import pandas as pd
import scipy.stats as ss
from .. import families as fam
from .. import tsm as tsm
from .. import data_check as dc
from .kalman import *
class LLT(tsm.TSM):
""" Inherits time series methods from TSM class.
**** LOCAL LINEAR TREND MODEL ****
Parameters
----------
data : pd.DataFrame or np.array
Field to specify the time series data that will be used.
integ : int (default : 0)
Specifies how many time to difference the time series.
target : str (pd.DataFrame) or int (np.array)
Specifies which column name or array index to use. By default, first
column/array will be selected as the dependent variable.
"""
def __init__(self,data,integ=0,target=None):
# Initialize TSM object
super(LLT,self).__init__('LLT')
# Latent Variables
self.integ = integ
self.param_no = 3
self.max_lag = 0
self._z_hide = 0 # Whether to cutoff variance latent variables from results
self.supported_methods = ["MLE","PML","Laplace","M-H","BBVI"]
self.default_method = "MLE"
self.model_name = "LLT"
self.multivariate_model = False
# Format the data
self.data, self.data_name, self.is_pandas, self.index = dc.data_check(data,target)
self.data = self.data.astype(np.float)
self.data_original = self.data
# Difference data
for order in range(self.integ):
self.data = np.diff(self.data)
self.data_name = "Differenced " + self.data_name
self.data_length = self.data.shape[0]
self._create_latent_variables()
self.z_no = 3
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
self.latent_variables.add_z('Sigma^2 irregular', fam.Flat(transform='exp'), fam.Normal(0,3))
self.latent_variables.add_z('Sigma^2 level', fam.Flat(transform='exp'), fam.Normal(0,3))
self.latent_variables.add_z('Sigma^2 trend', fam.Flat(transform='exp'), fam.Normal(0,3))
def _forecast_model(self,beta,h):
""" Creates forecasted states and variances
Parameters
----------
beta : np.ndarray
Contains untransformed starting values for latent variables
Returns
----------
a : np.ndarray
Forecasted states
P : np.ndarray
Variance of forecasted states
"""
T, Z, R, Q, H = self._ss_matrices(beta)
return llt_univariate_kalman_fcst(self.data,Z,H,T,Q,R,0.0,h)
def _model(self,data,beta):
""" Creates the structure of the model
Parameters
----------
data : np.array
Contains the time series
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
a,P,K,F,v : np.array
Filted states, filtered variances, Kalman gains, F matrix, residuals
"""
T, Z, R, Q, H = self._ss_matrices(beta)
return llt_univariate_kalman(data,Z,H,T,Q,R,0.0)
def _ss_matrices(self,beta):
""" Creates the state space matrices required
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
T, Z, R, Q, H : np.array
State space matrices used in KFS algorithm
"""
T = np.identity(2)
T[0][1] = 1
Z = np.zeros(2)
Z[0] = 1
R = np.identity(2)
Q = np.identity(2)
H = np.identity(1)*self.latent_variables.z_list[0].prior.transform(beta[0])
Q[0][0] = self.latent_variables.z_list[1].prior.transform(beta[1])
Q[1][1] = self.latent_variables.z_list[2].prior.transform(beta[2])
return T, Z, R, Q, H
def neg_loglik(self,beta):
""" Creates the negative log likelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
The negative log logliklihood of the model
"""
_, _, _, F, v = self._model(self.data,beta)
loglik = 0.0
for i in range(0,self.data.shape[0]):
loglik += np.linalg.slogdet(F[:,:,i])[1] + np.dot(v[i],np.dot(np.linalg.pinv(F[:,:,i]),v[i]))
return -(-((self.data.shape[0]/2)*np.log(2*np.pi))-0.5*loglik.T[0].sum())
def mb_neg_loglik(self, beta, mini_batch):
""" Creates the negative log likelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
mini_batch : int
Size of each mini batch of data
Returns
----------
The negative log logliklihood of the model
"""
rand_int = np.random.randint(low=0, high=self.data.shape[0]-mini_batch-self.max_lag+1)
sample = np.arange(start=rand_int, stop=rand_int+mini_batch)
_, _, _, F, v = self._model(self.data[sample],beta)
loglik = 0.0
for i in range(0,len(sample)):
loglik += np.linalg.slogdet(F[:,:,i])[1] + np.dot(v[i],np.dot(np.linalg.pinv(F[:,:,i]),v[i]))
return -(-((len(sample)/2)*np.log(2*np.pi))-0.5*loglik.T[0].sum())
def plot_predict(self, h=5, past_values=20, intervals=True, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show prediction intervals for the forecast?
Returns
----------
- Plot of the forecast
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
nsims = kwargs.get('nsims', 200)
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
# Retrieve data, dates and (transformed) latent variables
if self.latent_variables.estimation_method in ['M-H']:
lower_final = 0
upper_final = 0
plot_values_final = 0
date_index = self.shift_dates(h)
plot_index = date_index[-h-past_values:]
for i in range(nsims):
t_params = self.draw_latent_variables(nsims=1).T[0]
a, P = self._forecast_model(t_params, h)
plot_values = a[0][-h-past_values:]
forecasted_values = a[0][-h:]
lower = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
upper = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
lower_final += np.append(plot_values[-h-1], lower)
upper_final += np.append(plot_values[-h-1], upper)
plot_values_final += plot_values
plot_values_final = plot_values_final / nsims
lower_final = lower_final / nsims
upper_final = upper_final / nsims
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:], lower_final, upper_final, alpha=0.2)
plt.plot(plot_index, plot_values_final)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show()
else:
a, P = self._forecast_model(self.latent_variables.get_z_values(),h)
date_index = self.shift_dates(h)
plot_values = a[0][-h-past_values:]
forecasted_values = a[0][-h:]
lower = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
upper = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
lower = np.append(plot_values[-h-1],lower)
upper = np.append(plot_values[-h-1],upper)
plot_index = date_index[-h-past_values:]
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:], lower, upper, alpha=0.2)
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show()
def plot_fit(self,intervals=True,**kwargs):
""" Plots the fit of the model
Parameters
----------
intervals : Boolean
Whether to plot 95% confidence interval of states
Returns
----------
None (plots data and the fit)
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
series_type = kwargs.get('series_type','Smoothed')
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
date_index = copy.deepcopy(self.index)
date_index = date_index[self.integ:self.data_original.shape[0]+1]
if series_type == 'Smoothed':
mu, V= self.smoothed_state(self.data,self.latent_variables.get_z_values())
elif series_type == 'Filtered':
mu, V, _, _, _ = self._model(self.data,self.latent_variables.get_z_values())
else:
mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values())
mu0 = mu[0][:-1]
mu1 = mu[1][:-1]
Vlev = V[0][0][:-1]
Vtrend = V[0][1][:-1]
plt.figure(figsize=figsize)
plt.subplot(2, 2, 1)
plt.title(self.data_name + " Raw and " + series_type)
if intervals == True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
plt.fill_between(date_index[2:], mu0[2:] + 1.98*np.sqrt(Vlev[2:]), mu0[2:] - 1.98*np.sqrt(Vlev[2:]), alpha=0.15,label='95% C.I.')
plt.plot(date_index,self.data,label='Data')
plt.plot(date_index,mu0,label=series_type,c='black')
plt.legend(loc=2)
plt.subplot(2, 2, 2)
if intervals == True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
plt.fill_between(date_index[2:], mu0[2:] + 1.98*np.sqrt(Vlev[2:]), mu0[2:] - 1.98*np.sqrt(Vlev[2:]), alpha=0.15,label='95% C.I.')
plt.title(self.data_name + " Local Level")
plt.plot(date_index,mu0,label='Local Level')
plt.legend(loc=2)
plt.subplot(2, 2, 3)
if intervals == True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
plt.fill_between(date_index[2:], mu1[2:] + 1.98*np.sqrt(Vtrend[2:]), mu1[2:] - 1.98*np.sqrt(Vtrend[2:]), alpha=0.15,label='95% C.I.')
plt.title(self.data_name + " Trend")
plt.plot(date_index,mu1,label='Stochastic Trend')
plt.legend(loc=2)
plt.subplot(2, 2, 4)
plt.title("Measurement Noise")
plt.plot(date_index[1:self.data.shape[0]],self.data[1:self.data.shape[0]]-mu0[1:self.data.shape[0]],label='Irregular term')
plt.show()
def predict(self, h=5, intervals=False, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
intervals : boolean (default: False)
Whether to return prediction intervals
Returns
----------
- pd.DataFrame with predictions
"""
nsims = kwargs.get('nsims', 200)
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
# Retrieve data, dates and (transformed) latent variables
if self.latent_variables.estimation_method in ['M-H']:
lower_1_final = 0
upper_99_final = 0
lower_5_final = 0
upper_95_final = 0
forecasted_values_final = 0
date_index = self.shift_dates(h)
for i in range(nsims):
t_params = self.draw_latent_variables(nsims=1).T[0]
a, P = self._forecast_model(t_params, h)
forecasted_values = a[0][-h:]
lower_5 = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
upper_95 = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
lower_5_final += lower_5
upper_95_final += upper_95
lower_1 = forecasted_values - 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
upper_99 = forecasted_values + 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(t_params[0]),0.5)
lower_1_final += lower_1
upper_99_final += upper_99
forecasted_values_final += forecasted_values
forecasted_values_final = forecasted_values_final / nsims
lower_1_final = lower_1_final / nsims
lower_5_final = lower_5_final / nsims
upper_95_final = upper_95_final / nsims
upper_99_final = upper_99_final / nsims
if intervals is False:
result = pd.DataFrame(forecasted_values_final)
result.rename(columns={0:self.data_name}, inplace=True)
else:
prediction_05 = lower_5_final
prediction_95 = upper_95_final
prediction_01 = lower_1_final
prediction_99 = upper_99_final
result = pd.DataFrame([forecasted_values_final, prediction_01, prediction_05,
prediction_95, prediction_99]).T
result.rename(columns={0:self.data_name, 1: "1% Prediction Interval",
2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"},
inplace=True)
result.index = date_index[-h:]
return result
else:
# Retrieve data, dates and (transformed) latent variables
a, P = self._forecast_model(self.latent_variables.get_z_values(),h)
date_index = self.shift_dates(h)
forecasted_values = a[0][-h:]
if intervals is False:
result = pd.DataFrame(forecasted_values)
result.rename(columns={0:self.data_name}, inplace=True)
else:
prediction_05 = forecasted_values - 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
prediction_95 = forecasted_values + 1.96*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
prediction_01 = forecasted_values - 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
prediction_99 = forecasted_values + 2.575*np.power(P[0][0][-h:] + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0]),0.5)
result = pd.DataFrame([forecasted_values, prediction_01, prediction_05,
prediction_95, prediction_99]).T
result.rename(columns={0:self.data_name, 1: "1% Prediction Interval",
2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"},
inplace=True)
result.index = date_index[-h:]
return result
def predict_is(self, h=5, fit_once=True, fit_method='MLE', intervals=False):
""" Makes dynamic in-sample predictions with the estimated model
Parameters
----------
h : int (default : 5)
How many steps would you like to forecast?
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
Returns
----------
- pd.DataFrame with predicted values
"""
predictions = []
for t in range(0,h):
x = LLT(integ=self.integ,data=self.data_original[:(-h+t)])
if fit_once is False:
x.fit(fit_method=fit_method, printer=False)
if t == 0:
if fit_once is True:
x.fit(fit_method=fit_method, printer=False)
saved_lvs = x.latent_variables
predictions = x.predict(1, intervals=intervals)
else:
if fit_once is True:
x.latent_variables = saved_lvs
predictions = pd.concat([predictions,x.predict(1, intervals=intervals)])
predictions.rename(columns={0:self.data_name}, inplace=True)
predictions.index = self.index[-h:]
return predictions
def plot_predict_is(self, h=5, fit_once=True, fit_method='MLE', **kwargs):
""" Plots forecasts with the estimated model against data
(Simulated prediction with data)
Parameters
----------
h : int (default : 5)
How many steps to forecast
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
fit_method : string
Which method to fit the model with
Returns
----------
- Plot of the forecast against data
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
plt.figure(figsize=figsize)
predictions = self.predict_is(h, fit_once=fit_once, fit_method=fit_method)
data = self.data[-h:]
plt.plot(predictions.index,data,label='Data')
plt.plot(predictions.index,predictions,label='Predictions',c='black')
plt.title(self.data_name)
plt.legend(loc=2)
plt.show()
def simulation_smoother(self,beta):
""" Koopman's simulation smoother - simulates from states given
model latent variables and observations
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
- A simulated state evolution
"""
T, Z, R, Q, H = self._ss_matrices(beta)
# Generate e_t+ and n_t+
rnd_h = np.random.normal(0,np.sqrt(H),self.data.shape[0]+1)
q_dist = ss.multivariate_normal([0.0, 0.0], Q)
rnd_q = q_dist.rvs(self.data.shape[0]+1)
# Generate a_t+ and y_t+
a_plus = np.zeros((T.shape[0],self.data.shape[0]+1))
a_plus[0,0] = np.mean(self.data[0:5])
y_plus = np.zeros(self.data.shape[0])
for t in range(0,self.data.shape[0]+1):
if t == 0:
a_plus[:,t] = np.dot(T,a_plus[:,t]) + rnd_q[t,:]
y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]
else:
if t != self.data.shape[0]:
a_plus[:,t] = np.dot(T,a_plus[:,t-1]) + rnd_q[t,:]
y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]
alpha_hat,_ = self.smoothed_state(self.data,beta)
alpha_hat_plus,_ = self.smoothed_state(y_plus,beta)
alpha_tilde = alpha_hat - alpha_hat_plus + a_plus
return alpha_tilde
def smoothed_state(self,data,beta):
""" Creates the negative log marginal likelihood of the model
Parameters
----------
data : np.array
Data to be smoothed
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
- Smoothed states
"""
T, Z, R, Q, H = self._ss_matrices(beta)
alpha, V = llt_univariate_KFS(data,Z,H,T,Q,R,0.0)
return alpha, V
def sample(self, nsims=1000):
""" Samples from the posterior predictive distribution
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
Returns
----------
- np.ndarray of draws from the data
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
lv_draws = self.draw_latent_variables(nsims=nsims)
mus = [self.smoothed_state(self.data, lv_draws[:,i])[0][0][:-1] for i in range(nsims)]
data_draws = np.array([np.random.normal(mus[i], np.sqrt(self.latent_variables.z_list[0].prior.transform(lv_draws[0,i])), mus[i].shape[0]) for i in range(nsims)])
return data_draws
def plot_sample(self, nsims=10, plot_data=True, **kwargs):
"""
Plots draws from the posterior predictive density against the data
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
plot_data boolean
Whether to plot the data or not
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
plt.figure(figsize=figsize)
date_index = self.index
draws = self.sample(nsims).T
plt.plot(date_index, draws, label='Posterior Draws', alpha=1.0)
if plot_data is True:
plt.plot(date_index, self.data, label='Data', c='black', alpha=0.5, linestyle='', marker='s')
plt.title(self.data_name)
plt.show()
def ppc(self, nsims=1000, T=np.mean):
""" Computes posterior predictive p-value
Parameters
----------
nsims : int (default : 1000)
How many draws for the PPC
T : function
A discrepancy measure - e.g. np.mean, np.std, np.max
Returns
----------
- float (posterior predictive p-value)
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
lv_draws = self.draw_latent_variables(nsims=nsims)
mus = [self.smoothed_state(self.data, lv_draws[:,i])[0][0][:-1] for i in range(nsims)]
data_draws = np.array([np.random.normal(mus[i], np.sqrt(self.latent_variables.z_list[0].prior.transform(lv_draws[0,i])), mus[i].shape[0]) for i in range(nsims)])
T_sims = T(self.sample(nsims=nsims), axis=1)
T_actual = T(self.data)
return len(T_sims[T_sims>T_actual])/nsims
def plot_ppc(self, nsims=1000, T=np.mean, **kwargs):
""" Plots histogram of the discrepancy from draws of the posterior
Parameters
----------
nsims : int (default : 1000)
How many draws for the PPC
T : function
A discrepancy measure - e.g. np.mean, np.std, np.max
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
lv_draws = self.draw_latent_variables(nsims=nsims)
mus = [self.smoothed_state(self.data, lv_draws[:,i])[0][0][:-1] for i in range(nsims)]
data_draws = np.array([np.random.normal(mus[i], np.sqrt(self.latent_variables.z_list[0].prior.transform(lv_draws[0,i])), mus[i].shape[0]) for i in range(nsims)])
T_sim = T(self.sample(nsims=nsims), axis=1)
T_actual = T(self.data)
if T == np.mean:
description = " of the mean"
elif T == np.max:
description = " of the maximum"
elif T == np.min:
description = " of the minimum"
elif T == np.median:
description = " of the median"
else:
description = ""
plt.figure(figsize=figsize)
ax = plt.subplot()
ax.axvline(T_actual)
sns.distplot(T_sim, kde=False, ax=ax)
ax.set(title='Posterior predictive' + description, xlabel='T(x)', ylabel='Frequency');
plt.show()
| |
import datetime
import importlib
import io
import re
import select
import socket
import ssl
import time
from pyromancer import utils
class Pyromancer(object):
def __init__(self, settings_path):
self.settings = Settings(settings_path)
self.setup_database()
self.find_commands()
self.find_timers()
def run(self):
self.connect()
self.listen()
def connect(self):
self.connection = Connection(
self.settings.host, self.settings.port, self.settings.encoding,
self.settings.ssl)
self.online = True
self.connect_time = datetime.datetime.now()
self.connection.write('NICK {}\n'.format(self.settings.nick))
self.connection.write('USER {0} {1} {1} :{2}\n'.format(
self.settings.nick, self.settings.host, self.settings.real_name))
self.connection.me.nick = self.settings.nick
def listen(self):
self.online = True
ticks = self.settings.ticks
while self.online:
self.connection.read()
for line in self.connection.buffer.lines():
self.process(line)
for timer in self.timers:
timer.match(self.timers, self.connect_time, self.connection,
self.settings)
time.sleep(1.0 / ticks)
def process(self, line):
line = Line(line, self.connection)
if line[0] == 'PING':
self.connection.write('PONG {}\n'.format(line[1]))
for c in self.commands:
c.command.match(line, self.timers, self.connection, self.settings)
def find_commands(self):
self.commands = []
utils.find_functions(
self.settings.packages, self.commands, 'commands',
'disabled_commands', when=lambda f: hasattr(f, 'command'))
def find_timers(self):
self.timers = []
utils.find_functions(
self.settings.packages, self.timers, 'timers', 'disabled_timers',
when=lambda f: hasattr(f, 'timer'), ret=lambda f: f.timer)
def setup_database(self):
if self.settings.database:
from sqlalchemy import create_engine
from pyromancer.database import Session, Base
engine = create_engine(self.settings.database)
Session.configure(bind=engine)
for package in self.settings.packages:
if isinstance(package, tuple):
package = package[0]
module_name = '{}.models'.format(package)
try:
importlib.import_module(module_name)
except ImportError:
continue
Base.metadata.create_all(bind=engine)
class Settings(object):
def __init__(self, path):
main_settings = importlib.import_module(path)
self.packages = getattr(main_settings, 'packages', [])
self.package_settings = {}
self.package_name, _ = path.split('.', 1)
if self.package_name not in self.packages:
self.packages.insert(0, self.package_name)
for package in self.packages:
if isinstance(package, tuple):
package = package[0]
if package == self.package_name:
module = main_settings
else:
module = importlib.import_module('{}.settings'.format(package))
self.package_settings[package] = module
self.global_settings = None
if 'pyromancer' not in self.packages:
self.global_settings = importlib.import_module(
'pyromancer.settings')
def __getattr__(self, item):
for package in self.packages:
if isinstance(package, tuple):
package = package[0]
if hasattr(self.package_settings[package], item):
return getattr(self.package_settings[package], item)
if hasattr(self.global_settings, item):
return getattr(self.global_settings, item)
raise AttributeError('No such setting "{}" found in any of the '
'installed packages'.format(item))
class LineBuffer(object):
"""Line buffer based on irc library's DecodingLineBuffer.
See https://bitbucket.org/jaraco/irc
"""
line_sep_exp = re.compile(b'\r?\n')
def __init__(self, encoding='utf-8'):
self.buffer = b''
self.encoding = encoding
def feed(self, bytes):
self.buffer += bytes
def lines(self):
lines = self.line_sep_exp.split(self.buffer)
# save the last, unfinished, possibly empty line
self.buffer = lines.pop()
for line in lines:
yield line.decode(self.encoding, 'strict')
def __iter__(self):
return self.lines()
def __len__(self):
return len(self.buffer)
class Connection(object):
def __init__(self, host, port, encoding='utf8', use_ssl=False):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
# We need non-blocking so it won't hang when there is no data.
self.socket.setblocking(False)
if use_ssl:
self.socket = ssl.wrap_socket(
self.socket, do_handshake_on_connect=False)
# We need to do the handshake manually, because we use a
# non-blocking socket.
# https://docs.python.org/2/library/ssl.html#ssl-nonblocking
while True:
try:
self.socket.do_handshake()
break
except ssl.SSLWantReadError:
select.select([self.socket], [], [])
except ssl.SSLWantWriteError:
select.select([], [self.socket], [])
self.encoding = encoding
self.buffer = LineBuffer(self.encoding)
self.me = User('')
@property
def users(self):
users = [self.me]
for chan in self.me.channels:
users.extend(chan.users)
return list(set(users))
@property
def channels(self):
return self.me.channels
def write(self, data):
self.socket.send('{}\n'.format(data).encode(self.encoding))
def read(self, bytes=4096):
try:
self.buffer.feed(self.socket.recv(bytes))
except (io.BlockingIOError, ssl.SSLWantReadError):
pass
def msg(self, target, msg):
self.write('PRIVMSG {} :{}'.format(target, msg))
class User(object):
name = None
host = None
auth = None
def __init__(self, str):
if '@' in str:
self.nick, self.name, self.host = self.split_user_str(str)
else:
self.nick = str
self.channels = []
def __repr__(self):
return '{0.nick}@{0.host}'.format(self) if self.nick else self.host
@staticmethod
def split_user_str(str):
nick, parts = str.split('!', 1)
name, host = parts.split('@', 1)
return nick.lstrip(':'), name, host
@classmethod
def get(cls, user_str, pool):
if isinstance(pool, Match):
pool = pool.connection
if isinstance(pool, Connection):
pool = pool.users
if '@' in user_str:
nick, _, _ = cls.split_user_str(user_str)
else:
nick = user_str
for user in pool:
if user.nick == nick:
return user
class Channel(object):
def __init__(self, name):
self.name = name.lstrip(':')
self.users = []
def __repr__(self):
return self.name
@classmethod
def get(cls, name, pool):
if isinstance(pool, Match):
pool = pool.connection
if isinstance(pool, Connection):
pool = pool.channels
name = name.lstrip(':')
for chan in pool:
if chan.name == name:
return chan
class Match(object):
"""
This class acts as a layer between the code in commands and the other
objects and should have multiple uses and shortcut functions. An object is
created for every matching command and is passed on as an argument to the
command to use. It provides easy access to any captured groups of the
matching regex.
Later on, it should provide some utility functions for messaging and other
things a command may like to do.
"""
def __init__(self, match, line, connection, settings=None):
self.match = match
self.line = line
self.connection = connection
self.settings = settings
def __getitem__(self, item):
try:
return self.match.group(item)
except IndexError:
return ''
def msg(self, message, *args, **kwargs):
"""Shortcut to send a message through the connection.
This function sends the input message through the connection. A target
can be defined, else it will send it to the channel or user from the
input Line, effectively responding on whatever triggered the command
which calls this function to be called. If raw has not been set to
True, formatting will be applied using the standard Python Formatting
Mini-Language, using the additional given args and kwargs, along with
some additional kwargs, such as the match object to easily access Regex
matches, color codes and other things.
http://docs.python.org/3.3/library/string.html#format-string-syntax
"""
target = kwargs.pop('target', None)
raw = kwargs.pop('raw', False)
if not target:
target = self.line.sender.nick if self.line.pm else \
self.line.target
if not raw:
kw = {
'm': self,
'b': chr(2),
'c': chr(3),
'u': chr(31),
}
kw.update(kwargs)
try:
message = message.format(*args, **kw)
except IndexError:
if len(args) == 1 and isinstance(args[0], list):
# Message might be: msg, [arg1, arg2], kwargs
message = message.format(*args[0], **kw)
else:
raise
self.connection.msg(target, message)
CODE_PATTERN = re.compile(r'\d{3}')
COMMAND_PATTERN = re.compile(r'[A-Z]{4,5}')
class Line(object):
def __init__(self, data, connection):
self.raw = data.lstrip(':')
self.datetime = datetime.datetime.now()
self.connection = connection
self.parse()
def __getitem__(self, item):
try:
if self.usermsg:
return self.full_msg.split(' ')[item]
else:
return self.parts[item]
except IndexError:
return ''
def __repr__(self):
return '{0.sender}: {0.full_msg}'.format(self) if self.usermsg else \
self.raw
def parse(self):
self.parts = self.raw.split()
self.privmsg = self.parts[1] == 'PRIVMSG'
self.notice = self.parts[1] == 'NOTICE'
self.usermsg = self.privmsg or self.notice
if self.usermsg:
# ":Nick!Name@Host PRIVMSG #Chan :Hello world!"
self.target = self.parts[2]
self.pm = self.target[0] != '#'
self.full_msg = ' '.join(self.parts[3:])[1:]
elif CODE_PATTERN.match(self.parts[1]):
self.code = int(self.parts[1])
elif COMMAND_PATTERN.match(self.parts[1]):
self.command = self.parts[1]
if self.usermsg or getattr(self, 'command', None):
self.sender = User.get(self.parts[0], self.connection)
if not self.sender:
self.sender = User(self.parts[0])
self.channel = None
if not getattr(self, 'pm', False):
self.channel = Channel.get(self.parts[2], self.connection)
if not self.channel:
self.channel = Channel(self.parts[2])
class Timer(object):
def __init__(self, scheduled, msg_or_command=None, *args, **kwargs):
self.scheduled = scheduled
self.direct = kwargs.pop('direct', False)
self.remaining = kwargs.pop('count', 0)
target = kwargs.pop('target', None)
self.msg_tuple = None
self.function = None
if callable(msg_or_command):
self.function = msg_or_command
else:
if target is not None and msg_or_command is not None:
self.msg_tuple = (target, msg_or_command, args, kwargs,)
def __eq__(self, other):
return (self.scheduled == other.scheduled and
self.function is other.function and
self.msg_tuple == other.msg_tuple)
def match(self, timers, connect_time, connection, settings):
if self.matches(connect_time):
self.last_time = datetime.datetime.now()
match = Match(None, None, connection, settings)
if self.function is not None:
result = self.function(match)
if result is not None:
self.send_messages(result, match, timers)
if self.msg_tuple is not None:
self.send_messages(self.msg_tuple, match, timers)
if self.remaining > 0:
self.remaining -= 1
if self.remaining == 0:
timers.remove(self)
def matches(self, connect_time):
if self.direct:
self.direct = False
return True
if isinstance(self.scheduled, datetime.datetime):
next_time = self.scheduled
if isinstance(self.scheduled, datetime.timedelta):
if hasattr(self, 'last_time'):
next_time = self.last_time + self.scheduled
else:
next_time = connect_time + self.scheduled
return datetime.datetime.now() >= next_time
def send_messages(self, result, match, timers):
for r in utils.process_messages(result, with_target=True):
if isinstance(r, Timer):
timers.append(r)
else:
match.msg(r[1], *r[2], target=r[0], **r[3])
| |
import omf
import numpy as np
import discretize
def ravel_data_array(arr, nx, ny, nz):
"""Converts a 1D numpy array from ``discretize`` ordering (x, y, z)
to a flattened 1D numpy array with ``OMF`` ordering (z, y, x)
In ``discretize``, three-dimensional data are frequently organized within a
1D numpy array whose elements are ordered along the x-axis, then the y-axis,
then the z-axis. **ravel_data_array** converts the input array
(discretize format) to a 1D numpy array ordered according to the open
mining format; which is ordered along
the z-axis, then the y-axis, then the x-axis.
Parameters
----------
arr : numpy.ndarray
A 1D vector or nD array ordered along the x, then y, then z axes
nx : int
Number of cells along the x-axis
ny : int
Number of cells along the y-axis
nz : int
Number of cells along the z-axis
Returns
-------
numpy.ndarray (n_cells)
A flattened 1D array ordered according to the open mining format
Examples
--------
To demonstrate the reordering, we design a small 3D tensor mesh.
We print a numpy array with the xyz locations of cell the centers using the
original ordering (discretize). We then re-order the cell locations according to OMF.
>>> from discretize import TensorMesh
>>> import numpy as np
>>> hx = np.ones(4)
>>> hy = 2*np.ones(3)
>>> hz = 3*np.ones(2)
>>> mesh = TensorMesh([hx, hy, hz])
>>> dim = (mesh.nCz, mesh.nCy, mesh.nCx) # OMF orderting
>>> xc = np.reshape(mesh.cell_centers[:, 0], dim, order="C").ravel(order="F")
>>> yc = np.reshape(mesh.cell_centers[:, 1], dim, order="C").ravel(order="F")
>>> zc = np.reshape(mesh.cell_centers[:, 2], dim, order="C").ravel(order="F")
.. collapse:: Original ordering. Click to expand
>>> mesh.cell_centers
array([[0.5, 1. , 1.5],
[1.5, 1. , 1.5],
[2.5, 1. , 1.5],
[3.5, 1. , 1.5],
[0.5, 3. , 1.5],
[1.5, 3. , 1.5],
[2.5, 3. , 1.5],
[3.5, 3. , 1.5],
[0.5, 5. , 1.5],
[1.5, 5. , 1.5],
[2.5, 5. , 1.5],
[3.5, 5. , 1.5],
[0.5, 1. , 4.5],
[1.5, 1. , 4.5],
[2.5, 1. , 4.5],
[3.5, 1. , 4.5],
[0.5, 3. , 4.5],
[1.5, 3. , 4.5],
[2.5, 3. , 4.5],
[3.5, 3. , 4.5],
[0.5, 5. , 4.5],
[1.5, 5. , 4.5],
[2.5, 5. , 4.5],
[3.5, 5. , 4.5]])
.. collapse:: OMF ordering. Click to expand
>>> np.c_[xc, yc, zc]
array([[0.5, 1. , 1.5],
[0.5, 1. , 4.5],
[0.5, 3. , 1.5],
[0.5, 3. , 4.5],
[0.5, 5. , 1.5],
[0.5, 5. , 4.5],
[1.5, 1. , 1.5],
[1.5, 1. , 4.5],
[1.5, 3. , 1.5],
[1.5, 3. , 4.5],
[1.5, 5. , 1.5],
[1.5, 5. , 4.5],
[2.5, 1. , 1.5],
[2.5, 1. , 4.5],
[2.5, 3. , 1.5],
[2.5, 3. , 4.5],
[2.5, 5. , 1.5],
[2.5, 5. , 4.5],
[3.5, 1. , 1.5],
[3.5, 1. , 4.5],
[3.5, 3. , 1.5],
[3.5, 3. , 4.5],
[3.5, 5. , 1.5],
[3.5, 5. , 4.5]])
"""
dim = (nz, ny, nx)
return np.reshape(arr, dim, order="C").ravel(order="F")
def unravel_data_array(arr, nx, ny, nz):
"""Converts a 1D numpy array from ``OMF`` ordering (z, y, x)
to a flattened 1D numpy array with ``discretize`` ordering (x, y, z)
In ``OMF``, three-dimensional data are organized within a
1D numpy array whose elements are ordered along the z-axis, then the y-axis,
then the x-axis. **unravel_data_array** converts the input array
(OMF format) to a 1D numpy array ordered according to ``discretize``;
which is ordered along the x-axis, then the y-axis, then the y-axis.
Parameters
----------
arr : numpy.ndarray
A 1D vector or nD array ordered along the z, then y, then x axes
nx : int
Number of cells along the x-axis
ny : int
Number of cells along the y-axis
nz : int
Number of cells along the z-axis
Returns
-------
(n_cells) numpy.ndarray
A flattened 1D array ordered according to the discretize format
"""
dim = (nz, ny, nx)
return np.reshape(arr, dim, order="F").ravel(order="C")
class InterfaceOMF(object):
"""
The ``InterfaceOMF`` class was designed for easy conversion between
``discretize`` objects and `open mining format <https://www.seequent.com/the-open-mining-format/>`__ (OMF) objects.
Examples include: meshes, models and data arrays.
"""
def _tensor_mesh_to_omf(mesh, models=None):
"""
Constructs an :class:`omf.VolumeElement` object of this tensor mesh and
the given models as cell data of that grid.
Parameters
----------
mesh : discretize.TensorMesh
The tensor mesh to convert to a :class:`omf.VolumeElement`
models : dict(numpy.ndarray)
Name('s) and array('s). Match number of cells
"""
if models is None:
models = {}
# Make the geometry
geometry = omf.VolumeGridGeometry()
# Set tensors
tensors = mesh.h
if len(tensors) < 1:
raise RuntimeError(
"Your mesh is empty... fill it out before converting to OMF"
)
elif len(tensors) == 1:
geometry.tensor_u = tensors[0]
geometry.tensor_v = np.array(
[
0.0,
]
)
geometry.tensor_w = np.array(
[
0.0,
]
)
elif len(tensors) == 2:
geometry.tensor_u = tensors[0]
geometry.tensor_v = tensors[1]
geometry.tensor_w = np.array(
[
0.0,
]
)
elif len(tensors) == 3:
geometry.tensor_u = tensors[0]
geometry.tensor_v = tensors[1]
geometry.tensor_w = tensors[2]
else:
raise RuntimeError("This mesh is too high-dimensional for OMF")
# Set rotation axes
geometry.axis_u = mesh.axis_u
geometry.axis_v = mesh.axis_v
geometry.axis_w = mesh.axis_w
# Set the origin
geometry.origin = mesh.origin
# Make sure the geometry is built correctly
geometry.validate()
# Make the volume elemet (the OMF object)
omfmesh = omf.VolumeElement(
geometry=geometry,
)
# Add model data arrays onto the cells of the mesh
omfmesh.data = []
for name, arr in models.items():
data = omf.ScalarData(
name=name,
array=ravel_data_array(arr, *mesh.shape_cells),
location="cells",
)
omfmesh.data.append(data)
# Validate to make sure a proper OMF object is returned to the user
omfmesh.validate()
return omfmesh
def _tree_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not possible until OMF v2 is released.")
def _curvilinear_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not currently possible.")
def _cyl_mesh_to_omf(mesh, models=None):
raise NotImplementedError("Not currently possible.")
def to_omf(mesh, models=None):
"""Convert this mesh object to it's proper ``omf`` data object with
the given model dictionary as the cell data of that dataset.
Parameters
----------
models : dict of [str, (n_cells) numpy.ndarray], optional
Name('s) and array('s).
Returns
-------
omf.volume.VolumeElement
"""
# TODO: mesh.validate()
converters = {
# TODO: 'tree' : InterfaceOMF._tree_mesh_to_omf,
"tensor": InterfaceOMF._tensor_mesh_to_omf,
# TODO: 'curv' : InterfaceOMF._curvilinear_mesh_to_omf,
# TODO: 'CylindricalMesh' : InterfaceOMF._cyl_mesh_to_omf,
}
key = mesh._meshType.lower()
try:
convert = converters[key]
except KeyError:
raise RuntimeError(
"Mesh type `{}` is not currently supported for OMF conversion.".format(
key
)
)
# Convert the data object
return convert(mesh, models=models)
@staticmethod
def _omf_volume_to_tensor(element):
"""Convert an :class:`omf.VolumeElement` to :class:`discretize.TensorMesh`"""
geometry = element.geometry
h = [geometry.tensor_u, geometry.tensor_v, geometry.tensor_w]
mesh = discretize.TensorMesh(h)
mesh.axis_u = geometry.axis_u
mesh.axis_v = geometry.axis_v
mesh.axis_w = geometry.axis_w
mesh.origin = geometry.origin
data_dict = {}
for data in element.data:
# NOTE: this is agnostic about data location - i.e. nodes vs cells
data_dict[data.name] = unravel_data_array(
np.array(data.array), *mesh.shape_cells
)
# Return TensorMesh and data dictionary
return mesh, data_dict
@staticmethod
def from_omf(element):
"""Convert an OMF element to it's proper ``discretize`` type.
Automatically determines the output type. Returns both the mesh and a
dictionary of model arrays.
Parameters
----------
element : omf.volume.VolumeElement
The open mining format volume element object
Returns
-------
mesh : discretize.TensorMesh
The returned mesh type will be appropriately based on the input `element`.
models : dict of [str, (n_cells) numpy.ndarray]
The models contained in `element`
Notes
-----
Currently only :class:discretize.TensorMesh is supported.
"""
element.validate()
converters = {
omf.VolumeElement.__name__: InterfaceOMF._omf_volume_to_tensor,
}
key = element.__class__.__name__
try:
convert = converters[key]
except KeyError:
raise RuntimeError(
"OMF type `{}` is not currently supported for conversion.".format(key)
)
# Convert the data object
return convert(element)
| |
#!/usr/bin/python
# coding=UTF-8
# ----------------------------------------------------------------------------
#
# DRCONTROL.PY
#
# Copyright (C) 2012 Sebastian Sjoholm, sebastian.sjoholm@gmail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Version history can be found at
# http://code.google.com/p/drcontrol/wiki/VersionHistory
#
# $Rev$
# $Date$
#
# ----------------------------------------------------------------------------
from optparse import OptionParser
from pylibftdi import Driver
from pylibftdi import BitBangDevice
from ctypes.util import find_library
import sys
import time
# ----------------------------------------------------------------------------
# VARIABLE CLASSS
# ----------------------------------------------------------------------------
class app_data:
def __init__(
self,
name = "DRControl",
version = "0.12",
date = "$Date$",
rev = "$Rev$",
author = "Sebastian Sjoholm"
):
self.name = name
self.version = version
self.build = date
self.rev = rev
self.author = author
class cmdarg_data:
def __init__(
self,
device = "",
relay = "",
command = "",
verbose = False
):
self.device = device
self.relay = relay
self.command = command
self.verbose = verbose
class relay_data(dict):
address = {
"1":"2",
"2":"8",
"3":"20",
"4":"80",
"5":"1",
"6":"4",
"7":"10",
"8":"40",
"all":"FF"
}
def __getitem__(self, key): return self[key]
def keys(self): return self.keys()
# ----------------------------------------------------------------------------
# testBit() returns a nonzero result, 2**offset, if the bit at 'offset' is one.
# http://wiki.python.org/moin/BitManipulation
# ----------------------------------------------------------------------------
def testBit(int_type, offset):
mask = 1 << offset
return(int_type & mask)
def get_relay_state( data, relay ):
if relay == "1":
return testBit(data, 1)
if relay == "2":
return testBit(data, 3)
if relay == "3":
return testBit(data, 5)
if relay == "4":
return testBit(data, 7)
if relay == "5":
return testBit(data, 2)
if relay == "6":
return testBit(data, 4)
if relay == "7":
return testBit(data, 6)
if relay == "8":
return testBit(data, 8)
# ----------------------------------------------------------------------------
# LIST_DEVICES()
#
# Routine modified from the original pylibftdi example by Ben Bass
# ----------------------------------------------------------------------------
def list_devices():
print "Vendor\t\tProduct\t\t\tSerial"
dev_list = []
for device in Driver().list_devices():
device = map(lambda x: x.decode('latin1'), device)
vendor, product, serial = device
print "%s\t\t%s\t\t%s" % (vendor, product, serial)
# ----------------------------------------------------------------------------
# SET_RELAY()
#
# Set specified relay to chosen state
# ----------------------------------------------------------------------------
def set_relay():
if cmdarg.verbose:
print "Device:\t\t" + cmdarg.device
print "Send command:\tRelay " + cmdarg.relay + " (0x" + relay.address[cmdarg.relay] + ") to " + cmdarg.command.upper()
try:
with BitBangDevice(cmdarg.device) as bb:
# Action towards specific relay
if cmdarg.relay.isdigit():
if int(cmdarg.relay) >= 1 and int(cmdarg.relay) <= 8:
# Turn relay ON
if cmdarg.command == "on":
if cmdarg.verbose:
print "Relay " + str(cmdarg.relay) + " to ON"
bb.port |= int(relay.address[cmdarg.relay], 16)
# Turn relay OFF
elif cmdarg.command == "off":
if cmdarg.verbose:
print "Relay " + str(cmdarg.relay) + " to OFF"
bb.port &= ~int(relay.address[cmdarg.relay], 16)
# Print relay status
elif cmdarg.command == "state":
state = get_relay_state( bb.port, cmdarg.relay )
if state == 0:
if cmdarg.verbose:
print "Relay " + cmdarg.relay + " state:\tOFF (" + str(state) + ")"
else:
print "OFF"
else:
if cmdarg.verbose:
print "Relay " + cmdarg.relay + " state:\tON (" + str(state) + ")"
else:
print "ON"
# Action towards all relays
elif cmdarg.relay == "all":
if cmdarg.command == "on":
if cmdarg.verbose:
print "Relay " + str(cmdarg.relay) + " to ON"
bb.port |= int(relay.address[cmdarg.relay], 16)
elif cmdarg.command == "off":
if cmdarg.verbose:
print "Relay " + str(cmdarg.relay) + " to OFF"
bb.port &= ~int(relay.address[cmdarg.relay], 16)
elif cmdarg.command == "state":
for i in range(1,8):
state = get_relay_state( bb.port, str(i) )
if state == 0:
if cmdarg.verbose:
print "Relay " + str(i) + " state:\tOFF (" + str(state) + ")"
else:
print "OFF"
else:
if cmdarg.verbose:
print "Relay " + str(i) + " state:\tON (" + str(state) + ")"
else:
print "ON"
else:
print "Error: Unknown command"
else:
print "Error: Unknown relay number"
sys.exit(1)
except Exception, err:
print "Error: " + str(err)
sys.exit(1)
def check():
# Check python version
if sys.hexversion < 0x02060000:
print "Error: Your Python need to be 2.6 or newer"
sys.exit(1)
# Check availability on library, this check is also done in pylibftdi
ftdi_lib = find_library('ftdi')
if ftdi_lib is None:
print "Error: The pylibftdi library not found"
sys.exit(1)
if __name__ == '__main__':
# Init objects
cmdarg = cmdarg_data()
relay = relay_data()
app = app_data()
# Do system check
check()
parser = OptionParser()
parser.add_option("-d", "--device", action="store", type="string", dest="device", help="The device serial, example A6VV5PHY")
parser.add_option("-l", "--list", action="store_true", dest="list", default=False, help="List all devices")
parser.add_option("-r", "--relay", action="store", type="string", dest="relay", help="Relay to command by number: 1...8 or all")
parser.add_option("-c", "--command", action="store", type="string", dest="command", help="State: on, off, state")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Verbose, print all info on screen")
(options, args) = parser.parse_args()
if options.verbose:
cmdarg.verbose = options.verbose
print app.name + " " + app.version
else:
cmdarg.verbose = False
if options.list:
list_devices()
sys.exit(0)
if options.relay or options.command:
if not options.device:
print "Error: Device missing"
if options.device:
if not options.relay:
print "Error: Need to state which relay"
sys.exit(1)
if not options.command:
print "Error: Need to specify which relay state"
sys.exit(1)
cmdarg.device = options.device
cmdarg.relay = options.relay.lower()
cmdarg.command = options.command.lower()
set_relay()
sys.exit(0)
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
''' Module implementing Euler angle rotations and their conversions
See:
* http://en.wikipedia.org/wiki/Rotation_matrix
* http://en.wikipedia.org/wiki/Euler_angles
* http://mathworld.wolfram.com/EulerAngles.html
See also: *Representing Attitude with Euler Angles and Quaternions: A
Reference* (2006) by James Diebel. A cached PDF link last found here:
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.110.5134
Euler's rotation theorem tells us that any rotation in 3D can be
described by 3 angles. Let's call the 3 angles the *Euler angle vector*
and call the angles in the vector :math:`alpha`, :math:`beta` and
:math:`gamma`. The vector is [ :math:`alpha`,
:math:`beta`. :math:`gamma` ] and, in this description, the order of the
parameters specifies the order in which the rotations occur (so the
rotation corresponding to :math:`alpha` is applied first).
In order to specify the meaning of an *Euler angle vector* we need to
specify the axes around which each of the rotations corresponding to
:math:`alpha`, :math:`beta` and :math:`gamma` will occur.
There are therefore three axes for the rotations :math:`alpha`,
:math:`beta` and :math:`gamma`; let's call them :math:`i` :math:`j`,
:math:`k`.
Let us express the rotation :math:`alpha` around axis `i` as a 3 by 3
rotation matrix `A`. Similarly :math:`beta` around `j` becomes 3 x 3
matrix `B` and :math:`gamma` around `k` becomes matrix `G`. Then the
whole rotation expressed by the Euler angle vector [ :math:`alpha`,
:math:`beta`. :math:`gamma` ], `R` is given by::
R = np.dot(G, np.dot(B, A))
See http://mathworld.wolfram.com/EulerAngles.html
The order :math:`G B A` expresses the fact that the rotations are
performed in the order of the vector (:math:`alpha` around axis `i` =
`A` first).
To convert a given Euler angle vector to a meaningful rotation, and a
rotation matrix, we need to define:
* the axes `i`, `j`, `k`
* whether a rotation matrix should be applied on the left of a vector to
be transformed (vectors are column vectors) or on the right (vectors
are row vectors).
* whether the rotations move the axes as they are applied (intrinsic
rotations) - compared the situation where the axes stay fixed and the
vectors move within the axis frame (extrinsic)
* the handedness of the coordinate system
See: http://en.wikipedia.org/wiki/Rotation_matrix#Ambiguities
We are using the following conventions:
* axes `i`, `j`, `k` are the `z`, `y`, and `x` axes respectively. Thus
an Euler angle vector [ :math:`alpha`, :math:`beta`. :math:`gamma` ]
in our convention implies a :math:`alpha` radian rotation around the
`z` axis, followed by a :math:`beta` rotation around the `y` axis,
followed by a :math:`gamma` rotation around the `x` axis.
* the rotation matrix applies on the left, to column vectors on the
right, so if `R` is the rotation matrix, and `v` is a 3 x N matrix
with N column vectors, the transformed vector set `vdash` is given by
``vdash = np.dot(R, v)``.
* extrinsic rotations - the axes are fixed, and do not move with the
rotations.
* a right-handed coordinate system
The convention of rotation around ``z``, followed by rotation around
``y``, followed by rotation around ``x``, is known (confusingly) as
"xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles.
'''
import math
import numpy as np
_FLOAT_EPS_4 = np.finfo(float).eps * 4.0
def euler2mat(z=0, y=0, x=0):
''' Return matrix for rotations around z, y and x axes
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
M : array shape (3,3)
Rotation matrix giving same rotation as for given angles
Examples
--------
>>> zrot = 1.3 # radians
>>> yrot = -0.1
>>> xrot = 0.2
>>> M = euler2mat(zrot, yrot, xrot)
>>> M.shape
(3, 3)
The output rotation matrix is equal to the composition of the
individual rotations
>>> M1 = euler2mat(zrot)
>>> M2 = euler2mat(0, yrot)
>>> M3 = euler2mat(0, 0, xrot)
>>> composed_M = np.dot(M3, np.dot(M2, M1))
>>> np.allclose(M, composed_M)
True
You can specify rotations by named arguments
>>> np.all(M3 == euler2mat(x=xrot))
True
When applying M to a vector, the vector should column vector to the
right of M. If the right hand side is a 2D array rather than a
vector, then each column of the 2D array represents a vector.
>>> vec = np.array([1, 0, 0]).reshape((3,1))
>>> v2 = np.dot(M, vec)
>>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array
>>> vecs2 = np.dot(M, vecs)
Rotations are counter-clockwise.
>>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3))
>>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]])
True
>>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3))
>>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]])
True
>>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3))
>>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]])
True
Notes
-----
The direction of rotation is given by the right-hand rule (orient
the thumb of the right hand along the axis around which the rotation
occurs, with the end of the thumb at the positive end of the axis;
curl your fingers; the direction your fingers curl is the direction
of rotation). Therefore, the rotations are counterclockwise if
looking along the axis of rotation from positive to negative.
'''
Ms = []
if z:
cosz = math.cos(z)
sinz = math.sin(z)
Ms.append(np.array(
[[cosz, -sinz, 0],
[sinz, cosz, 0],
[0, 0, 1]]))
if y:
cosy = math.cos(y)
siny = math.sin(y)
Ms.append(np.array(
[[cosy, 0, siny],
[0, 1, 0],
[-siny, 0, cosy]]))
if x:
cosx = math.cos(x)
sinx = math.sin(x)
Ms.append(np.array(
[[1, 0, 0],
[0, cosx, -sinx],
[0, sinx, cosx]]))
if Ms:
return reduce(np.dot, Ms[::-1])
return np.eye(3)
def mat2euler(M, cy_thresh=None):
''' Discover Euler angle vector from 3x3 matrix
Uses the conventions above.
Parameters
----------
M : array-like, shape (3,3)
cy_thresh : None or scalar, optional
threshold below which to give up on straightforward arctan for
estimating x rotation. If None (default), estimate from
precision of input.
Returns
-------
z : scalar
y : scalar
x : scalar
Rotations in radians around z, y, x axes, respectively
Notes
-----
If there was no numerical error, the routine could be derived using
Sympy expression for z then y then x rotation matrix, (see
``eulerangles.py`` in ``derivations`` subdirectory)::
[ cos(y)*cos(z), -cos(y)*sin(z), sin(y)],
[cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)],
[sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)]
with the obvious derivations for z, y, and x
z = atan2(-r12, r11)
y = asin(r13)
x = atan2(-r23, r33)
Problems arise when cos(y) is close to zero, because both of::
z = atan2(cos(y)*sin(z), cos(y)*cos(z))
x = atan2(cos(y)*sin(x), cos(x)*cos(y))
will be close to atan2(0, 0), and highly unstable.
The ``cy`` fix for numerical instability below is from: *Graphics
Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN:
0123361559. Specifically it comes from EulerAngles.c by Ken
Shoemake, and deals with the case where cos(y) is close to zero:
See: http://www.graphicsgems.org/
The code appears to be licensed (from the website) as "can be used
without restrictions".
'''
M = np.asarray(M)
if cy_thresh is None:
try:
cy_thresh = np.finfo(M.dtype).eps * 4
except ValueError:
cy_thresh = _FLOAT_EPS_4
r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
# cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2)
cy = math.sqrt(r33*r33 + r23*r23)
if cy > cy_thresh: # cos(y) not close to zero, standard form
z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z))
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y))
else: # cos(y) (close to) zero, so x -> 0.0 (see above)
# so r21 -> sin(z), r22 -> cos(z) and
z = math.atan2(r21, r22)
y = math.atan2(r13, cy) # atan2(sin(y), cy)
x = 0.0
return z, y, x
def euler2quat(z=0, y=0, x=0):
''' Return quaternion corresponding to these Euler angles
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
quat : array shape (4,)
Quaternion in w, x, y z (real, then vector) format
Notes
-----
Formula from Sympy - see ``eulerangles.py`` in ``derivations``
subdirectory
'''
z = z/2.0
y = y/2.0
x = x/2.0
cz = math.cos(z)
sz = math.sin(z)
cy = math.cos(y)
sy = math.sin(y)
cx = math.cos(x)
sx = math.sin(x)
return np.array([
cx*cy*cz - sx*sy*sz,
cx*sy*sz + cy*cz*sx,
cx*cz*sy - sx*cy*sz,
cx*cy*sz + sx*cz*sy])
def quat2euler(q):
''' Return Euler angles corresponding to quaternion `q`
Parameters
----------
q : 4 element sequence
w, x, y, z of quaternion
Returns
-------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Notes
-----
It's possible to reduce the amount of calculation a little, by
combining parts of the ``quat2mat`` and ``mat2euler`` functions, but
the reduction in computation is small, and the code repetition is
large.
'''
# delayed import to avoid cyclic dependencies
import nipype.externals.pynifti.quaternions as nq
return mat2euler(nq.quat2mat(q))
def euler2angle_axis(z=0, y=0, x=0):
''' Return angle, axis corresponding to these Euler angles
Uses the z, then y, then x convention above
Parameters
----------
z : scalar
Rotation angle in radians around z-axis (performed first)
y : scalar
Rotation angle in radians around y-axis
x : scalar
Rotation angle in radians around x-axis (performed last)
Returns
-------
theta : scalar
angle of rotation
vector : array shape (3,)
axis around which rotation occurs
Examples
--------
>>> theta, vec = euler2angle_axis(0, 1.5, 0)
>>> theta #doctest: +SKIP
1.5 #doctest: +SKIP
>>> np.allclose(vec, [0, 1, 0])
True
'''
# delayed import to avoid cyclic dependencies
import nipype.externals.pynifti.quaternions as nq
return nq.quat2angle_axis(euler2quat(z, y, x))
def angle_axis2euler(theta, vector, is_normalized=False):
''' Convert angle, axis pair to Euler angles
Parameters
----------
theta : scalar
angle of rotation
vector : 3 element sequence
vector specifying axis for rotation.
is_normalized : bool, optional
True if vector is already normalized (has norm of 1). Default
False
Returns
-------
z : scalar
y : scalar
x : scalar
Rotations in radians around z, y, x axes, respectively
Examples
--------
>>> z, y, x = angle_axis2euler(0, [1, 0, 0])
>>> np.allclose((z, y, x), 0)
True
Notes
-----
It's possible to reduce the amount of calculation a little, by
combining parts of the ``angle_axis2mat`` and ``mat2euler``
functions, but the reduction in computation is small, and the code
repetition is large.
'''
# delayed import to avoid cyclic dependencies
import nipype.externals.pynifti.quaternions as nq
M = nq.angle_axis2mat(theta, vector, is_normalized)
return mat2euler(M)
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from mock import MagicMock, patch, ANY
from testtools import TestCase
from testtools.matchers import Equals, Is, Not
from novaclient.v1_1 import Client
from novaclient.v1_1.flavors import FlavorManager, Flavor
from novaclient.v1_1.servers import Server, ServerManager
from oslo.config import cfg
from trove.backup.models import Backup
from trove.common.context import TroveContext
from trove.common import instance as rd_instance
from trove.datastore import models as datastore_models
from trove.db.models import DatabaseModelBase
from trove.instance.models import DBInstance
from trove.instance.models import InstanceServiceStatus
from trove.instance.tasks import InstanceTasks
import trove.extensions.mgmt.instances.models as mgmtmodels
from trove.openstack.common.notifier import api as notifier
from trove.common import remote
from trove.tests.util import test_config
CONF = cfg.CONF
class MockMgmtInstanceTest(TestCase):
def setUp(self):
super(MockMgmtInstanceTest, self).setUp()
self.context = TroveContext()
self.context.auth_token = 'some_secret_password'
self.client = MagicMock(spec=Client)
self.server_mgr = MagicMock(spec=ServerManager)
self.client.servers = self.server_mgr
self.flavor_mgr = MagicMock(spec=FlavorManager)
self.client.flavors = self.flavor_mgr
remote.create_admin_nova_client = MagicMock(return_value=self.client)
CONF.set_override('host', 'test_host')
CONF.set_override('exists_notification_ticks', 1)
CONF.set_override('report_interval', 20)
CONF.set_override('notification_service_id', {'mysql': '123'})
def tearDown(self):
super(MockMgmtInstanceTest, self).tearDown()
@staticmethod
def build_db_instance(status, task_status=InstanceTasks.DELETING):
return DBInstance(task_status,
created='xyz',
name='test_name',
id='1',
flavor_id='flavor_1',
datastore_version_id=
test_config.dbaas_datastore_version_id,
compute_instance_id='compute_id_1',
server_id='server_id_1',
tenant_id='tenant_id_1',
server_status=status)
class TestNotificationTransformer(MockMgmtInstanceTest):
def test_tranformer(self):
transformer = mgmtmodels.NotificationTransformer(context=self.context)
status = rd_instance.ServiceStatuses.BUILDING.api_status
db_instance = MockMgmtInstanceTest.build_db_instance(
status, InstanceTasks.BUILDING)
with patch.object(DatabaseModelBase, 'find_all',
return_value=[db_instance]):
stub_dsv_db_info = MagicMock(
spec=datastore_models.DBDatastoreVersion)
stub_dsv_db_info.id = "test_datastore_version"
stub_dsv_db_info.datastore_id = "mysql_test_version"
stub_dsv_db_info.name = "test_datastore_name"
stub_dsv_db_info.image_id = "test_datastore_image_id"
stub_dsv_db_info.packages = "test_datastore_pacakges"
stub_dsv_db_info.active = 1
stub_dsv_db_info.manager = "mysql"
stub_datastore_version = datastore_models.DatastoreVersion(
stub_dsv_db_info)
def side_effect_func(*args, **kwargs):
if 'instance_id' in kwargs:
return InstanceServiceStatus(
rd_instance.ServiceStatuses.BUILDING)
else:
return stub_datastore_version
with patch.object(DatabaseModelBase, 'find_by',
side_effect=side_effect_func):
payloads = transformer()
self.assertIsNotNone(payloads)
self.assertThat(len(payloads), Equals(1))
payload = payloads[0]
self.assertThat(payload['audit_period_beginning'],
Not(Is(None)))
self.assertThat(payload['audit_period_ending'], Not(Is(None)))
self.assertThat(payload['state'], Equals(status.lower()))
def test_get_service_id(self):
id_map = {
'mysql': '123',
'percona': 'abc'
}
transformer = mgmtmodels.NotificationTransformer(context=self.context)
self.assertThat(transformer._get_service_id('mysql', id_map),
Equals('123'))
def test_get_service_id_unknown(self):
id_map = {
'mysql': '123',
'percona': 'abc'
}
transformer = mgmtmodels.NotificationTransformer(context=self.context)
self.assertThat(transformer._get_service_id('m0ng0', id_map),
Equals('unknown-service-id-error'))
class TestNovaNotificationTransformer(MockMgmtInstanceTest):
def test_transformer_cache(self):
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
transformer2 = mgmtmodels.NovaNotificationTransformer(
context=self.context)
self.assertThat(transformer._flavor_cache,
Not(Is(transformer2._flavor_cache)))
def test_lookup_flavor(self):
flavor = MagicMock(spec=Flavor)
flavor.name = 'flav_1'
with patch.object(self.flavor_mgr, 'get', side_effect=[flavor, None]):
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
self.assertThat(transformer._lookup_flavor('1'),
Equals(flavor.name))
self.assertThat(transformer._lookup_flavor('2'),
Equals('unknown'))
def test_tranformer(self):
status = rd_instance.ServiceStatuses.BUILDING.api_status
db_instance = MockMgmtInstanceTest.build_db_instance(
status, task_status=InstanceTasks.BUILDING)
stub_dsv_db_info = MagicMock(spec=datastore_models.DBDatastoreVersion)
stub_dsv_db_info.id = "test_datastore_version"
stub_dsv_db_info.datastore_id = "mysql_test_version"
stub_dsv_db_info.name = "test_datastore_name"
stub_dsv_db_info.image_id = "test_datastore_image_id"
stub_dsv_db_info.packages = "test_datastore_pacakges"
stub_dsv_db_info.active = 1
stub_dsv_db_info.manager = "mysql"
stub_datastore_version = datastore_models.DatastoreVersion(
stub_dsv_db_info)
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
server = MagicMock(spec=Server)
server.user_id = 'test_user_id'
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
db_instance,
server,
None)
with patch.object(DatabaseModelBase, 'find_by',
return_value=stub_datastore_version):
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
# invocation
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
payloads = transformer()
# assertions
self.assertIsNotNone(payloads)
self.assertThat(len(payloads), Equals(1))
payload = payloads[0]
self.assertThat(payload['audit_period_beginning'],
Not(Is(None)))
self.assertThat(payload['audit_period_ending'],
Not(Is(None)))
self.assertThat(payload['state'], Equals(status.lower()))
self.assertThat(payload['instance_type'],
Equals('db.small'))
self.assertThat(payload['instance_type_id'],
Equals('flavor_1'))
self.assertThat(payload['user_id'], Equals('test_user_id'))
self.assertThat(payload['service_id'], Equals('123'))
def test_tranformer_invalid_datastore_manager(self):
status = rd_instance.ServiceStatuses.BUILDING.api_status
db_instance = MockMgmtInstanceTest.build_db_instance(
status, task_status=InstanceTasks.BUILDING)
server = MagicMock(spec=Server)
server.user_id = 'test_user_id'
stub_datastore_version = MagicMock()
stub_datastore_version.id = "stub_datastore_version"
stub_datastore_version.manager = "m0ng0"
stub_datastore = MagicMock()
stub_datastore.default_datastore_version = "stub_datastore_version"
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
with patch.object(datastore_models.DatastoreVersion, 'load',
return_value=stub_datastore_version):
with patch.object(datastore_models.DatastoreVersion,
'load_by_uuid',
return_value=stub_datastore_version):
with patch.object(datastore_models.Datastore, 'load',
return_value=stub_datastore):
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
db_instance,
server,
None)
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr,
'get', return_value=flavor):
# invocation
transformer = (
mgmtmodels.NovaNotificationTransformer(
context=self.context)
)
payloads = transformer()
# assertions
self.assertIsNotNone(payloads)
self.assertThat(len(payloads), Equals(1))
payload = payloads[0]
self.assertThat(payload['audit_period_beginning'],
Not(Is(None)))
self.assertThat(payload['audit_period_ending'],
Not(Is(None)))
self.assertThat(payload['state'],
Equals(status.lower()))
self.assertThat(payload['instance_type'],
Equals('db.small'))
self.assertThat(payload['instance_type_id'],
Equals('flavor_1'))
self.assertThat(payload['user_id'],
Equals('test_user_id'))
self.assertThat(payload['service_id'],
Equals('unknown-service-id-error'))
def test_tranformer_shutdown_instance(self):
status = rd_instance.ServiceStatuses.SHUTDOWN.api_status
db_instance = self.build_db_instance(status)
server = MagicMock(spec=Server)
server.user_id = 'test_user_id'
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
db_instance,
server,
None)
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
with patch.object(Backup, 'running', return_value=None):
self.assertThat(mgmt_instance.status, Equals('SHUTDOWN'))
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
# invocation
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
payloads = transformer()
# assertion that SHUTDOWN instances are not reported
self.assertIsNotNone(payloads)
self.assertThat(len(payloads), Equals(0))
def test_tranformer_no_nova_instance(self):
status = rd_instance.ServiceStatuses.SHUTDOWN.api_status
db_instance = MockMgmtInstanceTest.build_db_instance(status)
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
db_instance,
None,
None)
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
with patch.object(Backup, 'running', return_value=None):
self.assertThat(mgmt_instance.status, Equals('SHUTDOWN'))
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
# invocation
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
payloads = transformer()
# assertion that SHUTDOWN instances are not reported
self.assertIsNotNone(payloads)
self.assertThat(len(payloads), Equals(0))
def test_tranformer_flavor_cache(self):
status = rd_instance.ServiceStatuses.BUILDING.api_status
db_instance = MockMgmtInstanceTest.build_db_instance(
status, InstanceTasks.BUILDING)
server = MagicMock(spec=Server)
server.user_id = 'test_user_id'
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
db_instance,
server,
None)
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
transformer()
# call twice ensure client.flavor invoked once
payloads = transformer()
self.assertIsNotNone(payloads)
self.assertThat(len(payloads), Equals(1))
payload = payloads[0]
self.assertThat(payload['audit_period_beginning'],
Not(Is(None)))
self.assertThat(payload['audit_period_ending'], Not(Is(None)))
self.assertThat(payload['state'], Equals(status.lower()))
self.assertThat(payload['instance_type'], Equals('db.small'))
self.assertThat(payload['instance_type_id'],
Equals('flavor_1'))
self.assertThat(payload['user_id'], Equals('test_user_id'))
# ensure cache was used to get flavor second time
self.flavor_mgr.get.assert_any_call('flavor_1')
class TestMgmtInstanceTasks(MockMgmtInstanceTest):
def test_public_exists_events(self):
status = rd_instance.ServiceStatuses.BUILDING.api_status
db_instance = MockMgmtInstanceTest.build_db_instance(
status, task_status=InstanceTasks.BUILDING)
server = MagicMock(spec=Server)
server.user_id = 'test_user_id'
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
db_instance,
server,
None)
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
self.assertThat(self.context.auth_token,
Is('some_secret_password'))
with patch.object(notifier, 'notify', return_value=None):
# invocation
mgmtmodels.publish_exist_events(
mgmtmodels.NovaNotificationTransformer(
context=self.context),
self.context)
# assertion
notifier.notify.assert_any_call(self.context,
'test_host',
'trove.instance.exists',
'INFO',
ANY)
self.assertThat(self.context.auth_token, Is(None))
| |
#============================================================================
# Name : k-means.py
# Author : LIAO
# Version : 1.0 2016.11.4
# Copyright : copyright (c)2016, LIAO
# Description : K-Means Cluster Implemented By Python
#============================================================================
import numpy as np
import math
import csv
class KMeansCluster:
"""
Cluster Based on K-Means Algorithm, follow the style of sklearn,
call fit() to train the model, call predict() test
"""
def __init__(self, k=2, centroids=None, metric='eculidean',
threshold = 1e-3, verbose=False):
"""
Parameters:
-----------
k: number of clusters, 'k' in k-means
centroids: centroids of clusters, which lenght is k
metric: distance metric for clusterring, default is eculidean
threshold: if moving of centroid position < threshold, then stop
verbose: print the detail or not
"""
self.k = k # Number of clusters
self.centroids = centroids # Centroids of clusters
self.metric = metric # Metric of distance
self.labels = None # Labels of all training data
self.threshold = threshold # Stopping threshold
self.max_rouds = 10000 # Max rounds ignore threshold
self.verbose = verbose
self.labels = None # Labels of train data
def fit(self, train_data):
""" Fit this model with train data and generate the cluster centroids
After calling this method, attribute self.centroids will be k-dim
array stands for k cluster centroids
train_data: training data, 2D array-like
"""
train_data = np.array(train_data)
self.data = train_data # save for calculating sse
self.n_samples = train_data.shape[0]
# select k initial centroids randomly
if self.centroids == None:
self.centroids = self.__sampling(train_data, self.k)
if self.verbose: print ('Initial centroids: ', self.centroids)
# giving all labels of training data based on distance with centroids
self.labels = np.zeros(self.n_samples)
for r in range(self.max_rouds):
# update labels
for i in range(self.n_samples):
self.labels[i] = np.argmin(eculidean_dis(train_data[i], self.centroids))
# update centroids
old_centroids = self.centroids.copy()
for i in range(len(self.centroids)):
if self.verbose:
print ('[{0}] centroid: {1} number: {2}'.format((r+1),
self.centroids[i], np.sum(self.labels==i)))
try:
self.centroids[i] = np.mean(train_data[self.labels==i,:], axis=0)
except RuntimeWarning:
print (train_data[i], self.centroids)
exit()
# terminal condition
if np.sum(eculidean_dis(old_centroids, self.centroids)) < self.threshold:
break
def predict(self, test_data):
""" return the label of test_data, that is label of nearest centroid """
return np.argmin(np.array(test_data, self.centroids))
def calc_sse(self):
""" Calculate the sum of the squared error """
sse = 0.0
for i in range(len(self.centroids)):
sse += np.sum((self.data[self.labels==i,:] - self.centroids[i]) ** 2)
return sse
def __sampling(self, data, n):
""" Random sampling NON-REPEAT n data from original data """
s = set()
n_samples = data.shape[0]
while len(s) < n:
s = set(np.random.randint(n_samples, size=n-len(s)).tolist())
return data[list(s),:]
def print_detail(self):
print ('clusters:', self.centroids)
class BisectingKMeansCluster:
""" Bisecting K-Means Cluster """
def __init__(self, k=2, centroids=None, metric='eculidean',
n_estimators=10, threshold = 1e-3, verbose=False):
"""
Parameters:
-----------
k: number of clusters, 'k' in k-means
centroids: centroids of clusters, which lenght is k
metric: distance metric for clusterring, default is eculidean
n_estimators: number of estimators for one split
threshold: if moving of centroid position < threshold, then stop
verbose: print the detail or not
"""
self.k = k
self.centroids = centroids
self.metric = metric
self.threshold = threshold
self.n_estimators = n_estimators
self.verbose = verbose
self.sses = None # SSE for each cluster
def fit(self, train_data):
""" Fit this model with train data and generate the cluster centroids
After calling this method, attribute self.centroids will be k-dim
array stands for k cluster centroids
train_data: training data, 2D array-like
"""
cur_k = 1 # current number of clusters
train_data = np.array(train_data)
n_samples = train_data.shape[0]
self.sses = [0]
self.data_map = dict() # data of all clusters
self.data_map[0] = train_data
self.centroids = [0]
while cur_k < self.k:
max_id = np.argmax(self.sses) # select maximum SSE cluster
models = list()
model_sses = list()
for i in range(self.n_estimators):
model = KMeansCluster(2, metric=self.metric,
threshold=self.threshold)
model.fit(self.data_map[max_id])
models.append(model)
model_sses.append(model.calc_sse())
model_id = np.argmin(model_sses)
model = models[model_id] # least SSE model
self.centroids[max_id] = model.centroids[0]
self.centroids.append(model.centroids[1])
data_0 = model.data[model.labels==0]
data_1 = model.data[model.labels==1]
self.data_map[max_id] = data_0
self.data_map[cur_k] = data_1
self.sses[max_id] = self.__calc_sse(data_0, model.centroids[0])
self.sses.append(self.__calc_sse(data_1, model.centroids[1]))
cur_k += 1
if self.verbose: print (cur_k, np.array(self.centroids))
def calc_sse(self):
sse = 0.0
for i in range(self.k):
sse += self.__calc_sse(self.data_map[i], np.array(self.centroids[i]))
return sse
def __calc_sse(self, data, centroid):
"""
Calculate SSE for a cluster
Parameters:
-----------
data: 2D array-like, one example each line
centroid: 1D array-like, stand for the centroid of this cluster
"""
return np.sum((data - centroid) ** 2)
def eculidean_dis(m1, m2):
""" Calculate Eculidean distance between m1 and m2 """
return np.sqrt(np.sum((m1-m2) ** 2, axis=1))
import datetime
if __name__ == '__main__':
filename = 'Relation_Network_(Directed).data'
#filename = '3D_spatial_network.txt'
#filename = 'eb.arff'
with open(filename, 'rb') as f:
reader = csv.reader(f)
data = list()
for row in reader:
data.append(row[7:]) # skip the id
data = np.array(data).astype(np.float32)
starttime = datetime.datetime.now()
model = BisectingKMeansCluster(verbose=1, k=4, threshold=1e-3)
#model = KMeansCluster(k=4,threshold=1e-3)
model.fit(data)
print(model.calc_sse())
endtime = datetime.datetime.now()
print ('Escape time:' )
print ((endtime-starttime))
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from sqlalchemy import exc as sa_exc
import testtools
from sahara.conductor import manager
from sahara import context
from sahara import exceptions as ex
import sahara.tests.unit.conductor.base as test_base
SAMPLE_CLUSTER = {
"plugin_name": "test_plugin",
"hadoop_version": "test_version",
"tenant_id": "tenant_1",
"name": "test_cluster",
"user_keypair_id": "my_keypair",
"node_groups": [
{
"name": "ng_1",
"flavor_id": "42",
"node_processes": ["p1", "p2"],
"count": 1,
"security_groups": None
},
{
"name": "ng_2",
"flavor_id": "42",
"node_processes": ["p3", "p4"],
"count": 3,
"security_groups": ["group1", "group2"]
}
],
"cluster_configs": {
"service_1": {
"config_2": "value_2"
},
"service_2": {
"config_1": "value_1"
}
},
}
class ClusterTest(test_base.ConductorManagerTestCase):
def __init__(self, *args, **kwargs):
super(ClusterTest, self).__init__(
checks=[
lambda: SAMPLE_CLUSTER,
lambda: manager.CLUSTER_DEFAULTS,
lambda: manager.NODE_GROUP_DEFAULTS,
lambda: manager.INSTANCE_DEFAULTS,
], *args, **kwargs)
def test_cluster_create_list_delete(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
self.assertIsInstance(cluster_db_obj, dict)
lst = self.api.cluster_get_all(ctx)
self.assertEqual(1, len(lst))
cl_id = lst[0]["id"]
self.api.cluster_destroy(ctx, cl_id)
lst = self.api.cluster_get_all(ctx)
self.assertEqual(0, len(lst))
with testtools.ExpectedException(ex.NotFoundException):
self.api.cluster_destroy(ctx, cl_id)
def test_duplicate_cluster_create(self):
ctx = context.ctx()
self.api.cluster_create(ctx, SAMPLE_CLUSTER)
with testtools.ExpectedException(ex.DBDuplicateEntry):
self.api.cluster_create(ctx, SAMPLE_CLUSTER)
def test_cluster_fields(self):
ctx = context.ctx()
cl_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
self.assertIsInstance(cl_db_obj, dict)
for key, val in SAMPLE_CLUSTER.items():
if key == 'node_groups':
# this will be checked separately
continue
self.assertEqual(val, cl_db_obj.get(key),
"Key not found %s" % key)
for ng in cl_db_obj["node_groups"]:
ng.pop("created_at")
ng.pop("updated_at")
ng.pop("id")
self.assertEqual(cl_db_obj["id"], ng.pop("cluster_id"))
ng.pop("image_id")
self.assertEqual([], ng.pop("instances"))
ng.pop("node_configs")
ng.pop("node_group_template_id")
ng.pop("volume_mount_prefix")
ng.pop("volumes_size")
ng.pop("volumes_per_node")
ng.pop("volumes_availability_zone")
ng.pop("volume_type")
ng.pop("floating_ip_pool")
ng.pop("image_username")
ng.pop("open_ports")
ng.pop("auto_security_group")
ng.pop("is_proxy_gateway")
ng.pop("tenant_id")
ng.pop("availability_zone")
ng.pop('volume_local_to_instance')
self.assertEqual(SAMPLE_CLUSTER["node_groups"],
cl_db_obj["node_groups"])
def test_cluster_no_ng(self):
ctx = context.ctx()
cluster_schema = copy.deepcopy(SAMPLE_CLUSTER)
cluster_schema.pop('node_groups')
cl_db_obj = self.api.cluster_create(ctx, cluster_schema)
self.assertIsInstance(cl_db_obj, dict)
for key, val in cluster_schema.items():
self.assertEqual(val, cl_db_obj.get(key),
"Key not found %s" % key)
self.assertEqual([], cl_db_obj["node_groups"])
def test_cluster_update_status(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
updated_cl = self.api.cluster_update(ctx, _id, {"status": "Active"})
self.assertIsInstance(updated_cl, dict)
self.assertEqual("Active", updated_cl["status"])
get_cl_obj = self.api.cluster_get(ctx, _id)
self.assertEqual(updated_cl, get_cl_obj)
with testtools.ExpectedException(ex.NotFoundException):
self.api.cluster_update(ctx, "bad_id", {"status": "Active"})
def _ng_in_cluster(self, cluster_db_obj, ng_id):
for ng in cluster_db_obj["node_groups"]:
if ng["id"] == ng_id:
return ng
return None
def test_add_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
node_group = {
"name": "ng_3",
"flavor_id": "42",
"node_processes": ["p3", "p4"],
"count": 5
}
ng_id = self.api.node_group_add(ctx, _id, node_group)
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertTrue(found_ng, "New Node Group not found")
def test_update_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
self.assertEqual(2, len(cluster_db_obj["node_groups"]))
ng_id = cluster_db_obj["node_groups"][-1]["id"]
self.api.node_group_update(ctx, ng_id, {"image_id": "test_image"})
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertTrue(found_ng, "Updated Node Group not found")
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual("test_image", ng["image_id"])
def test_delete_node_group(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
self.api.node_group_remove(ctx, ng_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
found_ng = self._ng_in_cluster(cluster_db_obj, ng_id)
self.assertFalse(found_ng, "Node Group is still in a CLuster")
with testtools.ExpectedException(ex.NotFoundException):
self.api.node_group_remove(ctx, ng_id)
def _add_instance(self, ctx, ng_id):
instance = {
"instance_name": "additional_vm"
}
return self.api.instance_add(ctx, ng_id, instance)
def test_add_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
count = cluster_db_obj["node_groups"][-1]["count"]
self._add_instance(ctx, ng_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
ng.pop('tenant_id')
self.assertEqual(count + 1, ng["count"])
self.assertEqual("additional_vm",
ng["instances"][0]["instance_name"])
def test_update_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
instance_id = self._add_instance(ctx, ng_id)
self.api.instance_update(ctx, instance_id,
{"management_ip": "1.1.1.1"})
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual("1.1.1.1", ng["instances"][0]["management_ip"])
def test_remove_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
count = cluster_db_obj["node_groups"][-1]["count"]
instance_id = self._add_instance(ctx, ng_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(count + 1, ng["count"])
self.api.instance_remove(ctx, instance_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(count, ng["count"])
with testtools.ExpectedException(ex.NotFoundException):
self.api.instance_remove(ctx, instance_id)
def test_cluster_search(self):
ctx = context.ctx()
self.api.cluster_create(ctx, SAMPLE_CLUSTER)
lst = self.api.cluster_get_all(ctx)
self.assertEqual(1, len(lst))
kwargs = {'name': SAMPLE_CLUSTER['name'],
'plugin_name': SAMPLE_CLUSTER['plugin_name']}
lst = self.api.cluster_get_all(ctx, **kwargs)
self.assertEqual(1, len(lst))
# Valid field but no matching value
kwargs = {'name': SAMPLE_CLUSTER['name']+'foo'}
lst = self.api.cluster_get_all(ctx, **kwargs)
self.assertEqual(0, len(lst))
# Invalid field
self.assertRaises(sa_exc.InvalidRequestError,
self.api.cluster_get_all,
ctx, **{'badfield': 'somevalue'})
| |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: _Policy.py
import dsz
import dsz.lp
import dsz.user
import dsz.script
import sys
import xml.dom.minidom
from policy._RegistryPolicy import AdminTemplates, Ipsec, Srp, SecurityOptions
from policy._RegistryPolicy import SECURITY_XML_PATH
from policy._Reader import Reader
from policy._SecurityPolicy import SystemServices, FileSecurity, RegistrySecurity, RestrictedGroups, Privileges, EventLog, Kerberos, EventAudit, SystemAccess
from policy._Shared import SYSTEM_ACCESS, EVENT_AUDIT, KERBEROS_POLICY, REGISTRY_VALUES, PRIVILEGE_RIGHTS, APPLICATION_LOG, SECURITY_LOG, SYSTEM_LOG, FILE_SECURITY, REGISTRY_KEYS, SERVICES, RESTRICTED
RESOURCE_DIR = 'Dsz'
TAB = '\t'
TABCNT = 0
def TaskingMain(namespace):
import mcl.target
import mcl.tasking
dsz.control.echo.Off()
params = mcl.tasking.GetParameters()
dsz.script.data.Start('Policy')
if params['doComputer']:
queryComputer(params)
if params['doUser']:
queryUser(params)
dsz.script.data.End()
dsz.script.data.Store()
mcl.tasking.TaskSetStatus(mcl.target.CALL_SUCCEEDED)
return True
def queryComputer(params):
global TABCNT
TABCNT += 1
dsz.script.data.Start('Computer')
dsz.ui.Echo(TAB * TABCNT + 'Computer Policy')
dsz.ui.Echo('----------------------------------------------------------------------------------------')
hklmReader = Reader('hklm', params)
if params['software']:
software = Srp('hklm')
software.process(hklmReader)
software.store(TABCNT)
if params['ipsec']:
ip = Ipsec('hklm')
ip.process(hklmReader)
ip.store(TABCNT)
if params['templates']:
templates = AdminTemplates('hklm', RESOURCE_DIR)
templates.process(hklmReader, Srp.getActualPaths() + Ipsec.getActualPaths() + SecurityOptions.getActualPaths(RESOURCE_DIR))
templates.store(TABCNT)
if params['account'] or params['privileges'] or params['restricted'] or params['permissions'] or params['audit'] or params['options'] or params['log']:
securityPolicy(params, hklmReader)
hklmReader.cleanup()
dsz.script.data.End()
TABCNT -= 1
def processSecurityXml():
path = '%s/%s/%s' % (dsz.lp.GetResourcesDirectory(), RESOURCE_DIR, SECURITY_XML_PATH)
xmldoc = xml.dom.minidom.parse('%s/%s/%s' % (dsz.lp.GetResourcesDirectory(), RESOURCE_DIR, SECURITY_XML_PATH))
settingList = xmldoc.getElementsByTagName('Setting')
settingDict = {SYSTEM_ACCESS: {},EVENT_AUDIT: {},KERBEROS_POLICY: {},REGISTRY_VALUES: {},PRIVILEGE_RIGHTS: {},APPLICATION_LOG: {},SECURITY_LOG: {},SYSTEM_LOG: {},FILE_SECURITY: {},REGISTRY_KEYS: {},SERVICES: {},RESTRICTED: {}}
for setting in settingList:
fullpath = setting.getElementsByTagName('Path')[0].firstChild.data
name = setting.getElementsByTagName('Name')[0].firstChild.data
key = setting.getElementsByTagName('RegistryKey')[0].firstChild.data
selector = fullpath.split('\\')[-1]
if selector == 'Account Lockout Policy' or selector == 'Password Policy':
settingDict[SYSTEM_ACCESS][key] = [
fullpath, name, key]
elif selector == 'Security Options' and (name == 'Accounts: Administrator account status' or name == 'Accounts: Guest account status' or name == 'Accounts: Rename administrator account' or name == 'Accounts: Rename guest account' or name == 'Network access: Allow anonymous SID/Name translation' or name == 'Network security: Force logoff when logon hours expire'):
settingDict[SYSTEM_ACCESS][key] = [fullpath, name, key]
settingDict[REGISTRY_VALUES][key] = [fullpath, name, key]
elif selector == 'Security Options':
settingDict[REGISTRY_VALUES][key] = [
fullpath, name, key]
elif selector == 'Audit Policy':
settingDict[EVENT_AUDIT][key] = [
fullpath, name, key]
elif selector == 'Kerberos Policy':
settingDict[KERBEROS_POLICY][key] = [
fullpath, name, key]
elif selector == 'User Rights Assignment':
settingDict[PRIVILEGE_RIGHTS][key] = [
fullpath, name, key]
elif selector == 'Event Log':
if name.__contains__('application'):
settingDict[APPLICATION_LOG][key] = [
fullpath, name, key]
elif name.__contains__('security'):
settingDict[SECURITY_LOG][key] = [
fullpath, name, key]
elif name.__contains__('system'):
settingDict[SYSTEM_LOG][key] = [
fullpath, name, key]
else:
dsz.ui.Echo('ERROR: Unable to properly parse security XML file', dsz.ERROR)
dsz.ui.Echo('Path: ' + fullpath, dsz.ERROR)
dsz.ui.Echo('Name: ' + name, dsz.ERROR)
dsz.ui.Echo('Key: ' + key, dsz.ERROR)
elif selector == 'Restricted Groups':
settingDict[RESTRICTED][key] = [
fullpath, name, key]
elif selector == 'System Services':
settingDict[SERVICES][key] = [
fullpath, name, key]
elif selector == 'Registry':
settingDict[REGISTRY_KEYS][key] = [
fullpath, name, key]
elif selector == 'File System':
settingDict[FILE_SECURITY][key] = [
fullpath, name, key]
else:
dsz.ui.Echo('ERROR: Unable to properly parse security XML file', dsz.ERROR)
dsz.ui.Echo('Path: ' + fullpath, dsz.ERROR)
dsz.ui.Echo('Name: ' + name, dsz.ERROR)
dsz.ui.Echo('Key: ' + key, dsz.ERROR)
return settingDict
def securityPolicy(params, hklmReader):
settingDict = processSecurityXml()
modifiedDacl = False
if params['force'] and hklmReader.grabLocal and not dsz.user.windows.IsSystem():
username = dsz.user.GetCurrent()
dsz.ui.Echo('Giving ' + username + ' access to HKLM\\Security')
modifiedDacl = dsz.cmd.Run('permissions -key L security -modify grant -sid ' + username)
if params['account']:
dsz.script.data.Start('Account')
dsz.ui.Echo(TAB * TABCNT + 'Account Policy(Password, Account Lockout, Kerberos)')
dsz.ui.Echo(TAB * TABCNT + '----------------------------------------------------------------------------------------')
kerb = Kerberos('hklm')
kerb.process(hklmReader, settingDict[KERBEROS_POLICY])
kerb.store(TABCNT)
accnt = SystemAccess('hklm')
accnt.process(hklmReader, settingDict[SYSTEM_ACCESS])
accnt.store(TABCNT)
dsz.script.data.End()
if params['audit']:
event = EventAudit('hklm')
event.process(hklmReader, settingDict[EVENT_AUDIT])
event.store(TABCNT)
if params['privileges']:
ura = Privileges('hklm')
ura.process(hklmReader, settingDict[PRIVILEGE_RIGHTS])
ura.store(TABCNT)
if params['options']:
opt = SecurityOptions('hklm')
opt.process(hklmReader, settingDict[REGISTRY_VALUES])
opt.store(TABCNT)
if params['log']:
event = EventLog('hklm')
event.process(hklmReader, settingDict)
event.store(TABCNT)
if params['restricted']:
restricted = RestrictedGroups('hklm')
restricted.process(hklmReader)
restricted.store(TABCNT)
if params['permissions']:
dsz.script.data.Start('permissions')
services = SystemServices('hklm')
services.process(hklmReader)
services.store(TABCNT)
file = FileSecurity('hklm')
file.process(hklmReader)
file.store(TABCNT)
reg = RegistrySecurity('hklm')
reg.process(hklmReader)
reg.store(TABCNT)
dsz.script.data.End()
if modifiedDacl:
dsz.cmd.Run('stop permissions')
def queryUser(params):
global TABCNT
TABCNT += 1
dsz.script.data.Start('User')
dsz.ui.Echo(TAB * TABCNT + 'User Policy')
dsz.ui.Echo('----------------------------------------------------------------------------------------')
hkcuReader = Reader('hkcu', params)
if params['software']:
software = Srp('hkcu')
software.process(hkcuReader)
software.store(TABCNT)
if params['templates']:
templates = AdminTemplates('hkcu', RESOURCE_DIR)
templates.process(hkcuReader, Srp.getActualPaths())
templates.store(TABCNT)
hkcuReader.cleanup()
dsz.script.data.End()
TABCNT -= 1
if __name__ == '__main__':
import sys
if TaskingMain(sys.argv[1]) != True:
sys.exit(-1)
| |
from data_utils import KB, Text, TextKb
import numpy as np
from tqdm import tqdm
class Batcher(object):
def __init__(self, input_file, kb_file, text_kb_file, batch_size, vocab_dir, return_one_epoch=False, shuffle=True,
min_num_mem_slots=100,
max_num_mem_slots=500,
min_num_text_mem_slots=0,
max_num_text_mem_slots=1000,
use_kb_mem=True,
use_text_mem=False):
self.batch_size = batch_size
self.input_file = input_file
self.kb_file = kb_file
self.text_kb_file = text_kb_file
self.shuffle = shuffle
self.max_num_mem_slots = max_num_mem_slots
self.min_num_mem_slots = min_num_mem_slots
self.max_num_text_mem_slots = max_num_text_mem_slots
self.min_num_text_mem_slots = min_num_text_mem_slots
self.vocab_dir = vocab_dir
self.return_one_epoch = return_one_epoch
self.use_kb_mem = use_kb_mem
self.use_text_mem = use_text_mem
self.questions, self.q_lengths, self.answers, \
self.kb_memory_slots, self.kb_num_memories, \
self.text_key_mem, self.text_key_len, \
self.text_val_mem, self.num_text_mems = self.read_files()
self.max_key_len = None
if self.use_text_mem and self.use_kb_mem:
assert self.text_key_mem is not None and self.kb_memory_slots is not None
elif self.use_kb_mem:
assert self.text_key_mem is None and self.kb_memory_slots is not None
else:
assert self.text_key_mem is not None and self.kb_memory_slots is None
self.num_questions = len(self.questions)
print('Num questions {}'.format(self.num_questions))
self.start_index = 0
if self.shuffle:
self.shuffle_data()
def get_next_batch(self):
"""
returns the next batch
TODO(rajarshd): move the if-check outside the loop, so that conditioned is not checked every damn time. the conditions are suppose to be immutable.
"""
while True:
if self.start_index >= self.num_questions:
if self.return_one_epoch:
return # stop after returning one epoch
self.start_index = 0
if self.shuffle:
self.shuffle_data()
else:
num_data_returned = min(self.batch_size, self.num_questions - self.start_index)
assert num_data_returned > 0
end_index = self.start_index + num_data_returned
if self.use_kb_mem and self.use_text_mem:
yield self.questions[self.start_index:end_index], self.q_lengths[self.start_index:end_index], \
self.answers[self.start_index:end_index], self.kb_memory_slots[self.start_index:end_index], \
self.kb_num_memories[self.start_index:end_index], self.text_key_mem[self.start_index:end_index], \
self.text_key_len[self.start_index:end_index], self.text_val_mem[self.start_index:end_index], \
self.num_text_mems[self.start_index:end_index]
elif self.use_kb_mem:
yield self.questions[self.start_index:end_index], self.q_lengths[self.start_index:end_index], \
self.answers[self.start_index:end_index], self.kb_memory_slots[self.start_index:end_index], \
self.kb_num_memories[self.start_index:end_index]
else:
yield self.questions[self.start_index:end_index], self.q_lengths[self.start_index:end_index], \
self.answers[self.start_index:end_index], self.text_key_mem[self.start_index:end_index], \
self.text_key_len[self.start_index:end_index], self.text_val_mem[self.start_index:end_index], \
self.num_text_mems[self.start_index:end_index]
self.start_index = end_index
def shuffle_data(self):
"""
Shuffles maintaining the same order.
"""
perm = np.random.permutation(self.num_questions) # perm of index in range(0, num_questions)
assert len(perm) == self.num_questions
if self.use_kb_mem and self.use_text_mem:
self.questions, self.q_lengths, self.answers, self.kb_memory_slots, self.kb_num_memories, self.text_key_mem,\
self.text_key_len, self.text_val_mem, self.num_text_mems = \
self.questions[perm], self.q_lengths[perm], self.answers[perm], self.kb_memory_slots[perm], \
self.kb_num_memories[perm], self.text_key_mem[perm], self.text_key_len[perm], self.text_val_mem[perm], self.num_text_mems[perm]
elif self.use_kb_mem:
self.questions, self.q_lengths, self.answers, self.kb_memory_slots, self.kb_num_memories = \
self.questions[perm], self.q_lengths[perm], self.answers[perm], self.kb_memory_slots[perm], \
self.kb_num_memories[perm]
else:
self.questions, self.q_lengths, self.answers, self.text_key_mem, self.text_key_len, self.text_val_mem,\
self.num_text_mems = self.questions[perm], self.q_lengths[perm], self.answers[perm], self.text_key_mem[perm],\
self.text_key_len[perm], self.text_val_mem[perm], self.num_text_mems[perm]
def reset(self):
self.start_index = 0
def read_files(self):
"""reads the kb and text files and creates the numpy arrays after padding"""
# read the KB file
kb = KB(self.kb_file, vocab_dir=self.vocab_dir) if self.use_kb_mem else None
# read text kb file
text_kb = TextKb(self.text_kb_file, vocab_dir=self.vocab_dir) if self.use_text_mem else None
self.max_key_len = text_kb.max_key_length if self.use_text_mem else None
# Question file
questions = Text(self.input_file,
max_num_facts=self.max_num_mem_slots,
min_num_facts=self.min_num_mem_slots,
min_num_text_facts=self.min_num_text_mem_slots,
max_num_text_facts=self.max_num_text_mem_slots)
max_q_length, max_num_kb_facts, max_num_text_kb_facts, question_list = questions.max_q_length, \
questions.max_num_kb_facts, \
questions.max_num_text_kb_facts, \
questions.question_list
entity_vocab = kb.entity_vocab if self.use_kb_mem else text_kb.entity_vocab
relation_vocab = kb.relation_vocab if self.use_kb_mem else text_kb.relation_vocab
num_questions = len(question_list)
question_lengths = np.ones([num_questions]) * -1
questions = np.ones([num_questions, max_q_length]) * entity_vocab['PAD']
answers = np.ones_like(question_lengths) * entity_vocab['UNK']
all_kb_memories = None
num_kb_memories = None
text_key_memories = None
text_key_lengths = None
text_val_memories = None
num_text_memories = None
if self.use_kb_mem:
print('Make data tensors for kb')
all_kb_memories = np.ones([num_questions, max_num_kb_facts, 3])
all_kb_memories[:, :, 0].fill(entity_vocab['DUMMY_MEM'])
all_kb_memories[:, :, 2].fill(entity_vocab['DUMMY_MEM'])
all_kb_memories[:, :, 1].fill(relation_vocab['DUMMY_MEM'])
num_kb_memories = np.ones_like(question_lengths) * -1
for q_counter, q in enumerate(tqdm(question_list)):
question_str = q.parsed_question['question']
question_entities = q.parsed_question['entities']
question_indices = q.parsed_question['indices']
q_answers = q.parsed_question['answers']
# num_kb_memories.append(q.parsed_question['num_facts'])
num_kb_memories[q_counter] = q.parsed_question['num_facts']
q_start_indices = np.asarray(q.parsed_question['start_indices'])
q_fact_lengths = np.asarray(
q.parsed_question['fact_lengths']) # for each entity in question retrieve the fact
sorted_index = np.argsort(q_fact_lengths)
q_fact_lengths = q_fact_lengths[sorted_index]
q_start_indices = q_start_indices[sorted_index]
question_words_list = question_str.split(' ')
for counter, index in enumerate(question_indices): # replace the entities with their ids
question_words_list[index] = question_entities[counter]
question_int = [entity_vocab[w_q] if w_q.strip() in entity_vocab else entity_vocab['UNK'] for w_q in
question_words_list]
question_len = len(question_int)
questions[q_counter, 0:question_len] = question_int
question_lengths[q_counter] = question_len
answer_int = [entity_vocab[a] if a in entity_vocab else entity_vocab['UNK'] for a in q_answers]
answers[q_counter] = answer_int[0]
# memories
kb_facts = kb.facts
mem_counter = 0
for counter, start_index in enumerate(q_start_indices):
num_facts = q_fact_lengths[counter]
if mem_counter < self.max_num_mem_slots:
for mem_index in xrange(start_index, start_index + num_facts):
mem = kb_facts[mem_index]
e1_int = entity_vocab[mem['e1']] if mem['e1'] in entity_vocab else entity_vocab['UNK']
e2_int = entity_vocab[mem['e2']] if mem['e2'] in entity_vocab else entity_vocab['UNK']
r_int = relation_vocab[mem['r']] if mem['r'] in relation_vocab else relation_vocab['UNK']
all_kb_memories[q_counter][mem_counter][0] = e1_int
all_kb_memories[q_counter][mem_counter][1] = r_int
all_kb_memories[q_counter][mem_counter][2] = e2_int
mem_counter += 1
if mem_counter == self.max_num_mem_slots: # will use the first max_num_mem_slots slots
break
if self.use_text_mem:
print('Make data tensors for text kb')
max_key_len = text_kb.max_key_length
text_key_memories = np.ones([num_questions, max_num_text_kb_facts, max_key_len]) * entity_vocab['DUMMY_MEM']
text_key_lengths = np.zeros([num_questions, max_num_text_kb_facts])
text_val_memories = np.ones([num_questions, max_num_text_kb_facts]) * entity_vocab['DUMMY_MEM']
num_text_memories = np.ones_like(question_lengths) * -1
for q_counter, q in enumerate(tqdm(question_list)):
# TODO (rajarshd): Move the repeated piece of code in a method.
question_str = q.parsed_question['question']
question_entities = q.parsed_question['entities']
question_indices = q.parsed_question['indices']
q_answers = q.parsed_question['answers']
question_words_list = question_str.split(' ')
for counter, index in enumerate(question_indices): # replace the entities with their ids
question_words_list[index] = question_entities[counter]
question_int = [entity_vocab[w_q] if w_q.strip() in entity_vocab else entity_vocab['UNK'] for w_q in
question_words_list]
question_len = len(question_int)
questions[q_counter, 0:question_len] = question_int
question_lengths[q_counter] = question_len
answer_int = [entity_vocab[a] if a in entity_vocab else entity_vocab['UNK'] for a in q_answers]
answers[q_counter] = answer_int[0]
# memories
num_q_text_memories = q.parsed_question['text_kb_num_facts']
# in the training set, account for the discarded memories
if 'black_lists' in q.parsed_question:
num_discarded = 0
for black_list in q.parsed_question['black_lists']:
num_discarded += len(black_list)
num_q_text_memories -= num_discarded
num_text_memories[q_counter] = num_q_text_memories
q_start_indices = np.asarray(q.parsed_question['text_kb_start_indices'])
q_fact_lengths = np.asarray(
q.parsed_question['text_kb_lengths']) # for each entity in question retrieve the fact
q_black_lists = np.asarray(
q.parsed_question['black_lists']) if 'black_lists' in q.parsed_question else None
sorted_index = np.argsort(q_fact_lengths)
q_fact_lengths = q_fact_lengths[sorted_index]
q_start_indices = q_start_indices[sorted_index]
q_black_lists = q_black_lists[sorted_index] if q_black_lists is not None else None
text_kb_facts = text_kb.facts_list
mem_counter = 0
for counter, start_index in enumerate(q_start_indices):
num_facts = q_fact_lengths[counter]
black_list_entity = set(q_black_lists[counter]) if q_black_lists is not None else None
if mem_counter < self.max_num_text_mem_slots:
for mem_entity_counter, mem_index in enumerate(xrange(start_index, start_index + num_facts)):
if black_list_entity is not None and mem_entity_counter in black_list_entity:
continue
mem = text_kb_facts[mem_index]
key = mem['key']
key_int = [entity_vocab[k] if k in entity_vocab else entity_vocab['UNK'] for k in key]
val = mem['value']
val_int = entity_vocab[val] if val in entity_vocab else entity_vocab['UNK']
key_len = int(mem['key_length'])
text_key_memories[q_counter][mem_counter][0:key_len] = key_int
text_val_memories[q_counter][mem_counter] = val_int
text_key_lengths[q_counter][mem_counter] = key_len
mem_counter += 1
if mem_counter == self.max_num_text_mem_slots: # will use the first max_num_mem_slots slots
break
return questions, question_lengths, answers, all_kb_memories, num_kb_memories, \
text_key_memories, text_key_lengths, text_val_memories, num_text_memories
| |
from __future__ import absolute_import
import csv
import json
from django.http import HttpResponse
from fae2.settings import SITE_URL
from reports.models import WebsiteReport
from pageResults.models import PageRuleCategoryResult
from pageResults.models import PageGuidelineResult
from pageResults.models import PageRuleScopeResult
from ruleCategories.models import RuleCategory
from wcag20.models import Guideline
from rules.models import RuleScope
def get_implementation_status(impl_status):
if impl_status in ['C', 'AC', 'AC-MC', 'PI', 'PI-MC', 'NI', 'NI-MC', 'MC']:
if 'MC' in impl_status:
return impl_status.strip('MC') + 'R'
else:
return impl_status
else:
return 'na'
def get_result(result_value):
if result_value == 5:
return 'Violation'
elif result_value == 4:
return 'Warning'
elif result_value == 3:
return 'Manual Check'
elif result_value == 2:
return 'Passed'
elif result_value == 1:
return 'Not Applicable'
def get_element_result(result_value):
if result_value == 5:
return 'Violation'
elif result_value == 4:
return 'Warning'
elif result_value == 3:
return 'Manual Check'
elif result_value == 2:
return 'Hidden'
elif result_value == 1:
return 'Pass'
def addReportInformation(report_obj, writer, path):
writer.writerow(['Meta Label', 'Meta Value'])
writer.writerow(['Title', report_obj.title])
writer.writerow(['URL', report_obj.url])
writer.writerow(['Ruleset', report_obj.ruleset.title])
writer.writerow(['Depth', report_obj.depth])
writer.writerow(['Pages', report_obj.page_count])
writer.writerow(['Report URL', SITE_URL + path + '/'])
writer.writerow([])
def addRuleInformation(rule, writer):
writer.writerow(['Rule', rule.nls_rule_id])
writer.writerow(['Summary', rule.summary_text])
writer.writerow(['WCAG', rule.wcag_primary])
writer.writerow([])
def addGroupInformation(group, writer):
writer.writerow(['Rule Group', group.title])
count = 1
for r in group.get_rules():
writer.writerow(['Rule ' + str(count), r.nls_rule_id + ': ' + r.summary_text + ' (' + str(r.wcag_primary) + ')'])
count += 1
writer.writerow([])
def addPageInformation(page, writer):
writer.writerow(['Page Number', page.page_number])
writer.writerow(['Page Title', page.title])
writer.writerow(['Page URL', page.url])
writer.writerow([])
def cleanMessage(message):
message = message.replace('<code>', '\'')
message = message.replace('</code>', '\'')
return message
def ReportRulesViewCSV(request, report, view):
# Create the HttpResponse object with the appropriate CSV header.
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + \
request.path.replace('/csv/', '').replace('/', '-').strip('-') + '.csv"'
writer = csv.writer(response)
report_obj = WebsiteReport.objects.get(slug=report)
addReportInformation(report_obj, writer, request.path.replace('/csv/', ''))
writer.writerow(['Rule Group', 'Violations', 'Warnings', 'Manual Check', 'Passed', 'N/A', 'Score', 'Status'])
page = False
if report_obj.page_count == 1:
page = report_obj.get_first_page()
if view == 'gl':
groups = page.page_gl_results.all()
elif view == 'rs':
groups = page.page_rs_results.all()
else:
groups = page.page_rc_results.all()
else:
if view == 'gl':
groups = report_obj.ws_gl_results.all()
elif view == 'rs':
groups = report_obj.ws_rs_results.all()
else:
groups = report_obj.ws_rc_results.all()
for g in groups:
writer.writerow(
[g.get_title(), g.rules_violation, g.rules_warning, g.rules_manual_check, g.rules_passed, g.rules_na,
g.implementation_score, get_implementation_status(g.implementation_status)])
writer.writerow(
['All Report Groups', report_obj.rules_violation, report_obj.rules_warning, report_obj.rules_manual_check,
report_obj.rules_passed, report_obj.rules_na, report_obj.implementation_score,
get_implementation_status(report_obj.implementation_status)])
return response
def ReportRulesGroupViewCSV(request, report, view, group):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + \
request.path.replace('/csv/', '').replace('/', '-').strip('-') + '.csv"'
writer = csv.writer(response)
report_obj = WebsiteReport.objects.get(slug=report)
addReportInformation(report_obj, writer, request.path.replace('/csv/', ''))
writer.writerow(
['ID', 'Rule Summary', 'Result', 'Violations', 'Warnings', 'Manual Check', 'Passed', 'N/A', 'Score', 'Status'])
if view == 'gl':
group = report_obj.ws_gl_results.get(slug=group)
elif view == 'rs':
group = report_obj.ws_rs_results.get(slug=group)
else:
group = report_obj.ws_rc_results.get(slug=group)
for g in group.ws_rule_results.all():
writer.writerow(
[g.rule.nls_rule_id, g.get_title(), get_result(g.result_value), g.pages_violation, g.pages_warning,
g.pages_manual_check, g.pages_passed, g.pages_na, g.implementation_score,
get_implementation_status(g.implementation_status)])
return response
def ReportRulesGroupRuleViewCSV(request, report, view, group, rule):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + \
request.path.replace('/csv/', '').replace('/', '-').strip('-') + '.csv"'
writer = csv.writer(response)
report_obj = WebsiteReport.objects.get(slug=report)
addReportInformation(report_obj, writer, request.path.replace('/csv/', ''))
if view == 'gl':
group = report_obj.ws_gl_results.get(slug=group)
elif view == 'rs':
group = report_obj.ws_rs_results.get(slug=group)
else:
group = report_obj.ws_rc_results.get(slug=group)
ws_rule_result = group.ws_rule_results.get(slug=rule)
addRuleInformation(ws_rule_result.rule, writer)
writer.writerow(
['Page', 'Page Title', 'Result', 'Elements Violation', 'Elements Warning', 'Elements Manual Check', 'Elements Passed', 'Score', 'Status'])
for wsr in ws_rule_result.page_rule_results.all():
writer.writerow(
[wsr.page_result.page_number, wsr.page_result.title, get_result(wsr.result_value), wsr.elements_violation,
wsr.elements_warning, wsr.elements_mc_identified, wsr.elements_passed, wsr.implementation_score,
get_implementation_status(wsr.implementation_status)])
writer.writerow([None, 'All Pages', get_result(ws_rule_result.result_value), ws_rule_result.elements_violation,
ws_rule_result.elements_warning, ws_rule_result.elements_mc_identified,
ws_rule_result.elements_passed, ws_rule_result.implementation_score,
get_implementation_status(ws_rule_result.implementation_status)])
return response
def ReportRulesGroupRulePageViewCSV(request, report, view, group, rule, page):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + \
request.path.replace('/csv/', '').replace('/', '-').strip('-') + '.csv"'
writer = csv.writer(response)
report_obj = WebsiteReport.objects.get(slug=report)
addReportInformation(report_obj, writer, request.path.replace('/csv/', ''))
if view == 'gl':
group = report_obj.ws_gl_results.get(slug=group)
elif view == 'rs':
group = report_obj.ws_rs_results.get(slug=group)
else:
group = report_obj.ws_rc_results.get(slug=group)
ws_rule_result = group.ws_rule_results.get(slug=rule)
page_rule_result = ws_rule_result.page_rule_results.get(page_result__page_number=page)
addRuleInformation(page_rule_result.rule, writer)
addPageInformation(page_rule_result.page_result, writer)
writer.writerow(['Element Identifier', 'Result', 'Element Position', 'Message'])
for prr in json.loads(page_rule_result.element_results_json):
writer.writerow(
[prr['element_identifier'], get_element_result(int(prr['result_value'])), prr['ordinal_position'],
cleanMessage(prr['message'])])
return response
def ReportPagesViewCSV(request, report, view):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + request.path.replace('/csv/', '').replace('/', '-').strip('-') + '.csv"'
writer = csv.writer(response)
report = WebsiteReport.objects.get(slug=report)
addReportInformation(report, writer, request.path.replace('/csv/', ''))
writer.writerow(
['Page', 'Page Title', 'URL', 'Result', 'Violations', 'Warnings', 'Manual Check', 'Passed', 'Not Applicable', 'Score', 'Status'])
for pr in report.page_all_results.all():
writer.writerow(
[pr.page_number, pr.get_title(), pr.url, get_result(pr.result_value), pr.rules_violation,
pr.rules_warning, pr.rules_manual_check, pr.rules_passed, pr.rules_na, pr.implementation_score,
get_implementation_status(pr.implementation_status)])
return response
def ReportPagesGroupViewCSV(request, report, view, group):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + request.path.replace('/csv/', '').replace('/', '-').strip('-') + '.csv"'
writer = csv.writer(response)
report = WebsiteReport.objects.get(slug=report)
addReportInformation(report, writer, request.path.replace('/csv/', ''))
page = False
if view == 'gl':
page_results = PageGuidelineResult.objects.filter(page_result__ws_report=report,
guideline__slug=group)
group = Guideline.objects.get(slug=group)
elif view == 'rs':
page_results = PageRuleScopeResult.objects.filter(page_result__ws_report=report,
rule_scope__slug=group)
group = RuleScope.objects.get(slug=group)
else:
page_results = PageRuleCategoryResult.objects.filter(page_result__ws_report=report,
rule_category__slug=group)
group = RuleCategory.objects.get(slug=group)
view = 'rc'
addGroupInformation(group, writer)
writer.writerow(
['Page', 'Page Title', 'URL', 'Result', 'Violations', 'Warnings', 'Manual Check', 'Passed', 'Not Applicable', 'Score', 'Status'])
for pr in page_results:
writer.writerow(
[pr.page_result.page_number, pr.page_result.get_title(), pr.page_result.url, get_result(pr.result_value), pr.rules_violation,
pr.rules_warning, pr.rules_manual_check, pr.rules_passed, pr.rules_na, pr.implementation_score,
get_implementation_status(pr.implementation_status)])
return response
def ReportPageViewCSV(request, report, view, page):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + request.path.replace('/csv/', '').replace('/', '-').strip('-') + '.csv"'
writer = csv.writer(response)
report = WebsiteReport.objects.get(slug=report)
page = report.page_all_results.get(page_number=page)
addReportInformation(report, writer, request.path.replace('/csv/', ''))
addPageInformation(page, writer)
writer.writerow(['Rule Group', 'Violations', 'Warnings', 'Manual Check', 'Passed', 'N/A', 'Score', 'Status'])
if view == 'gl':
groups = page.page_gl_results.all()
elif view == 'rs':
groups = page.page_rs_results.all()
else:
groups = page.page_rc_results.all()
view_opt = 'rc'
for g in groups:
writer.writerow(
[g.get_title(), g.rules_violation, g.rules_warning, g.rules_manual_check, g.rules_passed, g.rules_na,
g.implementation_score, get_implementation_status(g.implementation_status)])
return response
def ReportPageGroupViewCSV(request, report, view, group, page):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="' + request.path.replace('/csv/', '').replace('/', '-').strip('-') + '.csv"'
writer = csv.writer(response)
report = WebsiteReport.objects.get(slug=report)
page = report.page_all_results.get(page_number=page)
addReportInformation(report, writer, request.path.replace('/csv/', ''))
addPageInformation(page, writer)
writer.writerow(['Rule ID', 'Rule Summary', 'Element Violations', 'Element Warnings', 'Element Manual Check', 'Element Passed', 'Element Hidden', 'Score', 'Status'])
if view == 'gl':
group_results = page.page_gl_results.get(slug=group)
group_info = Guideline.objects.get(slug=group)
elif view == 'rs':
group_results = page.page_rs_results.get(slug=group)
group_info = RuleScope.objects.get(slug=group)
else:
group_results = page.page_rc_results.get(slug=group)
group_info = RuleCategory.objects.get(slug=group)
for prr in group_results.page_rule_results.all():
writer.writerow(
[prr.rule.nls_rule_id, prr.rule.summary_text, prr.elements_violation, prr.elements_warning, prr.elements_mc_identified, prr.elements_passed, prr.elements_hidden,
prr.implementation_score, get_implementation_status(prr.implementation_status)])
return response
def ReportPageGroupRuleViewCSV(request, report, view, group, page, rule):
return ReportRulesGroupRulePageViewCSV(request, report, view, group, rule, page)
| |
"""
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: http://matthewdhoffman.com/code/onlineldavb.tar
"""
# Author: Chyi-Kwei Yau
# Author: Matthew D. Hoffman (original onlineldavb implementation)
import numpy as np
import scipy.sparse as sp
from scipy.special import gammaln
import warnings
from ..base import BaseEstimator, TransformerMixin
from ..utils import (check_random_state, check_array,
gen_batches, gen_even_slices, _get_n_jobs)
from ..utils.fixes import logsumexp
from ..utils.validation import check_non_negative
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
from ..exceptions import NotFittedError
from ._online_lda import (mean_change, _dirichlet_expectation_1d,
_dirichlet_expectation_2d)
EPS = np.finfo(np.float).eps
def _update_doc_distribution(X, exp_topic_word_distr, doc_topic_prior,
max_iters,
mean_change_tol, cal_sstats, random_state):
"""E-step: update document-topic distribution.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
exp_topic_word_distr : dense matrix, shape=(n_topics, n_features)
Exponential value of expection of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_iters : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-setp.
cal_sstats : boolean
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100., 0.01, (n_samples, n_topics))
else:
doc_topic_distr = np.ones((n_samples, n_topics))
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = np.zeros(exp_topic_word_distr.shape) if cal_sstats else None
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
for idx_d in xrange(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in xrange(0, max_iters):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
doc_topic_d = (exp_doc_topic_d *
np.dot(cnts / norm_phi, exp_topic_word_d.T))
# Note: adds doc_topic_prior to doc_topic_d, in-place.
_dirichlet_expectation_1d(doc_topic_d, doc_topic_prior,
exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + EPS
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(BaseEstimator, TransformerMixin):
"""Latent Dirichlet Allocation with online variational Bayes algorithm
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_components : int, optional (default=10)
Number of topics.
doc_topic_prior : float, optional (default=None)
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_components`.
In the literature, this is called `alpha`.
topic_word_prior : float, optional (default=None)
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_components`.
In the literature, this is called `eta`.
learning_method : 'batch' | 'online', default='online'
Method used to update `_component`. Only used in `fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
The default learning method is going to be changed to 'batch' in the
0.20 release.
Valid options::
'batch': Batch variational Bayes method. Use all training data in
each EM update.
Old `components_` will be overwritten in each iteration.
'online': Online variational Bayes method. In each EM update, use
mini-batch of training data to update the ``components_``
variable incrementally. The learning rate is controlled by the
``learning_decay`` and the ``learning_offset`` parameters.
learning_decay : float, optional (default=0.7)
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, optional (default=10.)
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : integer, optional (default=10)
The maximum number of iterations.
batch_size : int, optional (default=128)
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int optional (default=0)
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or negative number to not evalute perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
total_samples : int, optional (default=1e6)
Total number of documents. Only used in the `partial_fit` method.
perp_tol : float, optional (default=1e-1)
Perplexity tolerance in batch learning. Only used when
``evaluate_every`` is greater than 0.
mean_change_tol : float, optional (default=1e-3)
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int (default=100)
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, optional (default=1)
The number of jobs to use in the E-step. If -1, all CPUs are used. For
``n_jobs`` below -1, (n_cpus + 1 + n_jobs) are used.
verbose : int, optional (default=0)
Verbosity level.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_topics : int, optional (default=None)
This parameter has been renamed to n_components and will
be removed in version 0.21.
.. deprecated:: 0.19
Attributes
----------
components_ : array, [n_components, n_features]
Variational parameters for topic word distribution. Since the complete
conditional for topic word distribution is a Dirichlet,
``components_[i, j]`` can be viewed as pseudocount that represents the
number of times word `j` was assigned to topic `i`.
It can also be viewed as distribution over the words for each topic
after normalization:
``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``.
n_batch_iter_ : int
Number of iterations of the EM step.
n_iter_ : int
Number of passes over the dataset.
References
----------
[1] "Online Learning for Latent Dirichlet Allocation", Matthew D. Hoffman,
David M. Blei, Francis Bach, 2010
[2] "Stochastic Variational Inference", Matthew D. Hoffman, David M. Blei,
Chong Wang, John Paisley, 2013
[3] Matthew D. Hoffman's onlineldavb code. Link:
http://matthewdhoffman.com//code/onlineldavb.tar
"""
def __init__(self, n_components=10, doc_topic_prior=None,
topic_word_prior=None, learning_method=None,
learning_decay=.7, learning_offset=10., max_iter=10,
batch_size=128, evaluate_every=-1, total_samples=1e6,
perp_tol=1e-1, mean_change_tol=1e-3, max_doc_update_iter=100,
n_jobs=1, verbose=0, random_state=None, n_topics=None):
self.n_components = n_components
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
self.n_topics = n_topics
def _check_params(self):
"""Check model parameters."""
if self.n_topics is not None:
self._n_components = self.n_topics
warnings.warn("n_topics has been renamed to n_components in "
"version 0.19 and will be removed in 0.21",
DeprecationWarning)
else:
self._n_components = self.n_components
if self._n_components <= 0:
raise ValueError("Invalid 'n_components' parameter: %r"
% self._n_components)
if self.total_samples <= 0:
raise ValueError("Invalid 'total_samples' parameter: %r"
% self.total_samples)
if self.learning_offset < 0:
raise ValueError("Invalid 'learning_offset' parameter: %r"
% self.learning_offset)
if self.learning_method not in ("batch", "online", None):
raise ValueError("Invalid 'learning_method' parameter: %r"
% self.learning_method)
def _init_latent_vars(self, n_features):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1. / self._n_components
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1. / self._n_components
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.
init_var = 1. / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self._n_components, n_features))
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
cal_sstats : boolean
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : boolean
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel (optional)
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = _get_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0,
self.verbose - 1))
results = parallel(
delayed(_update_doc_distribution)(X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol, cal_sstats,
random_state)
for idx_slice in gen_even_slices(X.shape[0], n_jobs))
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `_component` by batch VB or online VB.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
total_samples : integer
Total number of documents. It is only used when
batch_update is `False`.
batch_update : boolean
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : array, shape=(n_samples, n_components)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(X, cal_sstats=True, random_init=True,
parallel=parallel)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(self.learning_offset + self.n_batch_iter_,
-self.learning_decay)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= (1 - weight)
self.components_ += (weight * (self.topic_word_prior_
+ doc_ratio * suff_stats))
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_))
self.n_batch_iter_ += 1
return
def _check_non_neg_array(self, X, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, whom)
return X
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.partial_fit")
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if not hasattr(self, 'components_'):
self._init_latent_vars(n_features)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0,
self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel)
return self
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
self
"""
self._check_params()
X = self._check_non_neg_array(X, "LatentDirichletAllocation.fit")
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
if learning_method is None:
warnings.warn("The default value for 'learning_method' will be "
"changed from 'online' to 'batch' in the release "
"0.20. This warning was introduced in 0.18.",
DeprecationWarning)
learning_method = 'online'
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features)
# change to perplexity later
last_bound = None
n_jobs = _get_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0,
self.verbose - 1)) as parallel:
for i in xrange(max_iter):
if learning_method == 'online':
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(X[idx_slice, :], total_samples=n_samples,
batch_update=False, parallel=parallel)
else:
# batch update
self._em_step(X, total_samples=n_samples,
batch_update=True, parallel=parallel)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
bound = self._perplexity_precomp_distr(X, doc_topics_distr,
sub_sampling=False)
if self.verbose:
print('iteration: %d of max_iter: %d, perplexity: %.4f'
% (i + 1, max_iter, bound))
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
elif self.verbose:
print('iteration: %d of max_iter: %d' % (i + 1, max_iter))
self.n_iter_ += 1
# calculate final perplexity value on train set
doc_topics_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False,
parallel=parallel)
self.bound_ = self._perplexity_precomp_distr(X, doc_topics_distr,
sub_sampling=False)
return self
def _unnormalized_transform(self, X):
"""Transform data X according to fitted model.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_components)
Document topic distribution for X.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
# make sure feature size is the same in fitted model and in X
X = self._check_non_neg_array(X, "LatentDirichletAllocation.transform")
n_samples, n_features = X.shape
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d." %
(n_features, self.components_.shape[1]))
doc_topic_distr, _ = self._e_step(X, cal_sstats=False,
random_init=False)
return doc_topic_distr
def transform(self, X):
"""Transform data X according to the fitted model.
.. versionchanged:: 0.18
*doc_topic_distr* is now normalized
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : shape=(n_samples, n_components)
Document topic distribution for X.
"""
doc_topic_distr = self._unnormalized_transform(X)
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
doc_topic_distr : array, shape=(n_samples, n_components)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : boolean, optional, (default=False)
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_components = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in xrange(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d]:X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d]:X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (dirichlet_doc_topic[idx_d, :, np.newaxis]
+ dirichlet_component_[:, ids])
norm_phi = logsumexp(temp, axis=0)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(doc_topic_prior, doc_topic_distr,
dirichlet_doc_topic, self._n_components)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(topic_word_prior, self.components_,
dirichlet_component_, n_features)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Document word matrix.
Returns
-------
score : float
Use approximate bound as score.
"""
X = self._check_non_neg_array(X, "LatentDirichletAllocation.score")
doc_topic_distr = self._unnormalized_transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def _perplexity_precomp_distr(self, X, doc_topic_distr=None,
sub_sampling=False):
"""Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_components)
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if not hasattr(self, 'components_'):
raise NotFittedError("no 'components_' attribute in model."
" Please fit model first.")
X = self._check_non_neg_array(X,
"LatentDirichletAllocation.perplexity")
if doc_topic_distr is None:
doc_topic_distr = self._unnormalized_transform(X)
else:
n_samples, n_components = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError("Number of samples in X and doc_topic_distr"
" do not match.")
if n_components != self._n_components:
raise ValueError("Number of topics does not match.")
current_samples = X.shape[0]
bound = self._approx_bound(X, doc_topic_distr, sub_sampling)
if sub_sampling:
word_cnt = X.sum() * (float(self.total_samples) / current_samples)
else:
word_cnt = X.sum()
perword_bound = bound / word_cnt
return np.exp(-1.0 * perword_bound)
def perplexity(self, X, doc_topic_distr='deprecated', sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
.. versionchanged:: 0.19
*doc_topic_distr* argument has been deprecated and is ignored
because user no longer has access to unnormalized distribution
Parameters
----------
X : array-like or sparse matrix, [n_samples, n_features]
Document word matrix.
doc_topic_distr : None or array, shape=(n_samples, n_components)
Document topic distribution.
This argument is deprecated and is currently being ignored.
.. deprecated:: 0.19
sub_sampling : bool
Do sub-sampling or not.
Returns
-------
score : float
Perplexity score.
"""
if doc_topic_distr != 'deprecated':
warnings.warn("Argument 'doc_topic_distr' is deprecated and is "
"being ignored as of 0.19. Support for this "
"argument will be removed in 0.21.",
DeprecationWarning)
return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling)
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-bitcoinrpc to module search path:
import os
import sys
import time # BU added
import random # BU added
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
assert_equal,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
wait_bitcoinds,
enable_coverage,
check_json_precision,
initialize_chain_clean,
)
from .authproxy import AuthServiceProxy, JSONRPCException
class BitcoinTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*50)
def add_options(self, parser):
pass
def setup_chain(self,bitcoinConfDict=None, wallets=None):
"""
Sets up the blockchain for the bitcoin nodes. It also sets up the daemon configuration.
bitcoinConfDict: Pass a dictionary of values you want written to bitcoin.conf. If you have a key with multiple values, pass a list of the values as the value, for example:
{ "debug":["net","blk","thin","lck","mempool","req","bench","evict"] }
This framework provides values for the necessary fields (like regtest=1). But you can override these
defaults by setting them in this dictionary.
wallets: Pass a list of wallet filenames. Each wallet file will be copied into the node's directory
before starting the node.
"""
print("Initializing test directory ", self.options.tmpdir, "Bitcoin conf: ", str(bitcoinConfDict), "walletfiles: ", wallets)
initialize_chain(self.options.tmpdir,bitcoinConfDict, wallets)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(True)
def sync_all(self):
"""Synchronizes blocks and mempools"""
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def sync_blocks(self):
"""Synchronizes blocks"""
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
else:
sync_blocks(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(False)
def main(self,argsOverride=None,bitcoinConfDict=None,wallets=None):
"""
argsOverride: pass your own values for sys.argv in this field (or pass None) to use sys.argv
bitcoinConfDict: Pass a dictionary of values you want written to bitcoin.conf. If you have a key with multiple values, pass a list of the values as the value, for example:
{ "debug":["net","blk","thin","lck","mempool","req","bench","evict"] }
This framework provides values for the necessary fields (like regtest=1). But you can override these
defaults by setting them in this dictionary.
wallets: Pass a list of wallet filenames. Each wallet file will be copied into the node's directory
before starting the node.
"""
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
# BU: added for tests using randomness (e.g. excessive.py)
parser.add_option("--randomseed", dest="randomseed",
help="Set RNG seed for tests that use randomness (ignored otherwise)")
self.add_options(parser)
(self.options, self.args) = parser.parse_args(argsOverride)
# BU: initialize RNG seed based on time if no seed specified
if self.options.randomseed:
self.randomseed = int(self.options.randomseed)
else:
self.randomseed = int(time.time())
random.seed(self.randomseed)
print("Random seed: %s" % self.randomseed)
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
# Not pretty but, I changed the function signature
# of setup_chain to allow customization of the setup.
# However derived object may still use the old format
if self.setup_chain.__defaults__ is None:
self.setup_chain()
else:
self.setup_chain(bitcoinConfDict, wallets)
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
wait_bitcoinds()
else:
print("Note: bitcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
# Can override the num_nodes variable to indicate how many nodes to run.
def __init__(self):
self.num_nodes = 2
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_chain(self,bitcoinConfDict=None, wallets=None): # BU add config params
print("Initializing test directory ", self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, self.num_nodes,bitcoinConfDict, wallets)
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import time
import asyncio
import discord.abc
from .permissions import Permissions
from .enums import ChannelType, try_enum
from .mixins import Hashable
from . import utils
from .asset import Asset
from .errors import ClientException, NoMoreItems
from .webhook import Webhook
__all__ = (
'TextChannel',
'VoiceChannel',
'DMChannel',
'CategoryChannel',
'StoreChannel',
'GroupChannel',
'_channel_factory',
)
async def _single_delete_strategy(messages):
for m in messages:
await m.delete()
class TextChannel(discord.abc.Messageable, discord.abc.GuildChannel, Hashable):
"""Represents a Discord guild text channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns the channel's name.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`Guild`
The guild the channel belongs to.
id: :class:`int`
The channel ID.
category_id: :class:`int`
The category channel ID this channel belongs to.
topic: Optional[:class:`str`]
The channel's topic. None if it doesn't exist.
position: :class:`int`
The position in the channel list. This is a number that starts at 0. e.g. the
top channel is position 0.
last_message_id: Optional[:class:`int`]
The last message ID of the message sent to this channel. It may
*not* point to an existing or valid message.
slowmode_delay: :class:`int`
The number of seconds a member must wait between sending messages
in this channel. A value of `0` denotes that it is disabled.
Bots and users with :attr:`~Permissions.manage_channels` or
:attr:`~Permissions.manage_messages` bypass slowmode.
"""
__slots__ = ('name', 'id', 'guild', 'topic', '_state', 'nsfw',
'category_id', 'position', 'slowmode_delay', '_overwrites',
'_type', 'last_message_id')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._type = data['type']
self._update(guild, data)
def __repr__(self):
return '<TextChannel id={0.id} name={0.name!r} position={0.position}>'.format(self)
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.topic = data.get('topic')
self.position = data['position']
self.nsfw = data.get('nsfw', False)
# Does this need coercion into `int`? No idea yet.
self.slowmode_delay = data.get('rate_limit_per_user', 0)
self._type = data.get('type', self._type)
self.last_message_id = utils._get_as_snowflake(data, 'last_message_id')
self._fill_overwrites(data)
async def _get_channel(self):
return self
@property
def _sorting_bucket(self):
return ChannelType.text.value
def permissions_for(self, member):
base = super().permissions_for(member)
# text channels do not have voice related permissions
denied = Permissions.voice()
base.value &= ~denied.value
return base
permissions_for.__doc__ = discord.abc.GuildChannel.permissions_for.__doc__
@property
def members(self):
"""Returns a :class:`list` of :class:`Member` that can see this channel."""
return [m for m in self.guild.members if self.permissions_for(m).read_messages]
def is_nsfw(self):
"""Checks if the channel is NSFW."""
return self.nsfw
def is_news(self):
"""Checks if the channel is a news channel."""
return self._type == ChannelType.news.value
@property
def last_message(self):
"""Fetches the last message from this channel in cache.
The message might not be valid or point to an existing message.
.. admonition:: Reliable Fetching
:class: helpful
For a slightly more reliable method of fetching the
last message, consider using either :meth:`history`
or :meth:`fetch_message` with the :attr:`last_message_id`
attribute.
Returns
---------
Optional[:class:`Message`]
The last message in this channel or ``None`` if not found.
"""
return self._state._get_message(self.last_message_id) if self.last_message_id else None
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
----------
name: :class:`str`
The new channel name.
topic: :class:`str`
The new channel's topic.
position: :class:`int`
The new channel's position.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
sync_permissions: :class:`bool`
Whether to sync permissions with the channel's new or pre-existing
category. Defaults to ``False``.
category: Optional[:class:`CategoryChannel`]
The new category for this channel. Can be ``None`` to remove the
category.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel, in seconds.
A value of `0` disables slowmode. The maximum value possible is `21600`.
reason: Optional[:class:`str`]
The reason for editing this channel. Shows up on the audit log.
Raises
------
InvalidArgument
If position is less than 0 or greater than the number of channels.
Forbidden
You do not have permissions to edit the channel.
HTTPException
Editing the channel failed.
"""
await self._edit(options, reason=reason)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'topic': self.topic,
'nsfw': self.nsfw,
'rate_limit_per_user': self.slowmode_delay
}, name=name, reason=reason)
clone.__doc__ = discord.abc.GuildChannel.clone.__doc__
async def delete_messages(self, messages):
"""|coro|
Deletes a list of messages. This is similar to :meth:`Message.delete`
except it bulk deletes multiple messages.
As a special case, if the number of messages is 0, then nothing
is done. If the number of messages is 1 then single message
delete is done. If it's more than two, then bulk delete is used.
You cannot bulk delete more than 100 messages or messages that
are older than 14 days old.
You must have the :attr:`~Permissions.manage_messages` permission to
use this.
Usable only by bot accounts.
Parameters
-----------
messages: Iterable[:class:`abc.Snowflake`]
An iterable of messages denoting which ones to bulk delete.
Raises
------
ClientException
The number of messages to delete was more than 100.
Forbidden
You do not have proper permissions to delete the messages or
you're not using a bot account.
HTTPException
Deleting the messages failed.
"""
if not isinstance(messages, (list, tuple)):
messages = list(messages)
if len(messages) == 0:
return # do nothing
if len(messages) == 1:
message_id = messages[0].id
await self._state.http.delete_message(self.id, message_id)
return
if len(messages) > 100:
raise ClientException('Can only bulk delete messages up to 100 messages')
message_ids = [m.id for m in messages]
await self._state.http.delete_messages(self.id, message_ids)
async def purge(self, *, limit=100, check=None, before=None, after=None, around=None, oldest_first=False, bulk=True):
"""|coro|
Purges a list of messages that meet the criteria given by the predicate
``check``. If a ``check`` is not provided then all messages are deleted
without discrimination.
You must have the :attr:`~Permissions.manage_messages` permission to
delete messages even if they are your own (unless you are a user
account). The :attr:`~Permissions.read_message_history` permission is
also needed to retrieve message history.
Internally, this employs a different number of strategies depending
on the conditions met such as if a bulk delete is possible or if
the account is a user bot or not.
Examples
---------
Deleting bot's messages ::
def is_me(m):
return m.author == client.user
deleted = await channel.purge(limit=100, check=is_me)
await channel.send('Deleted {} message(s)'.format(len(deleted)))
Parameters
-----------
limit: Optional[:class:`int`]
The number of messages to search through. This is not the number
of messages that will be deleted, though it can be.
check: predicate
The function used to check if a message should be deleted.
It must take a :class:`Message` as its sole parameter.
before
Same as ``before`` in :meth:`history`.
after
Same as ``after`` in :meth:`history`.
around
Same as ``around`` in :meth:`history`.
oldest_first
Same as ``oldest_first`` in :meth:`history`.
bulk: :class:`bool`
If True, use bulk delete. bulk=False is useful for mass-deleting
a bot's own messages without manage_messages. When True, will fall
back to single delete if current account is a user bot, or if
messages are older than two weeks.
Raises
-------
Forbidden
You do not have proper permissions to do the actions required.
HTTPException
Purging the messages failed.
Returns
--------
List[:class:`.Message`]
The list of messages that were deleted.
"""
if check is None:
check = lambda m: True
iterator = self.history(limit=limit, before=before, after=after, oldest_first=oldest_first, around=around)
ret = []
count = 0
minimum_time = int((time.time() - 14 * 24 * 60 * 60) * 1000.0 - 1420070400000) << 22
strategy = self.delete_messages if self._state.is_bot and bulk else _single_delete_strategy
while True:
try:
msg = await iterator.next()
except NoMoreItems:
# no more messages to poll
if count >= 2:
# more than 2 messages -> bulk delete
to_delete = ret[-count:]
await strategy(to_delete)
elif count == 1:
# delete a single message
await ret[-1].delete()
return ret
else:
if count == 100:
# we've reached a full 'queue'
to_delete = ret[-100:]
await strategy(to_delete)
count = 0
await asyncio.sleep(1)
if check(msg):
if msg.id < minimum_time:
# older than 14 days old
if count == 1:
await ret[-1].delete()
elif count >= 2:
to_delete = ret[-count:]
await strategy(to_delete)
count = 0
strategy = _single_delete_strategy
count += 1
ret.append(msg)
async def webhooks(self):
"""|coro|
Gets the list of webhooks from this channel.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
Raises
-------
Forbidden
You don't have permissions to get the webhooks.
Returns
--------
List[:class:`Webhook`]
The webhooks for this channel.
"""
data = await self._state.http.channel_webhooks(self.id)
return [Webhook.from_state(d, state=self._state) for d in data]
async def create_webhook(self, *, name, avatar=None, reason=None):
"""|coro|
Creates a webhook for this channel.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
.. versionchanged:: 1.1.0
Added the ``reason`` keyword-only parameter.
Parameters
-------------
name: :class:`str`
The webhook's name.
avatar: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the webhook's default avatar.
This operates similarly to :meth:`~ClientUser.edit`.
reason: Optional[:class:`str`]
The reason for creating this webhook. Shows up in the audit logs.
Raises
-------
HTTPException
Creating the webhook failed.
Forbidden
You do not have permissions to create a webhook.
Returns
--------
:class:`Webhook`
The created webhook.
"""
if avatar is not None:
avatar = utils._bytes_to_base64_data(avatar)
data = await self._state.http.create_webhook(self.id, name=str(name), avatar=avatar, reason=reason)
return Webhook.from_state(data, state=self._state)
class VoiceChannel(discord.abc.Connectable, discord.abc.GuildChannel, Hashable):
"""Represents a Discord guild voice channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns the channel's name.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`Guild`
The guild the channel belongs to.
id: :class:`int`
The channel ID.
category_id: :class:`int`
The category channel ID this channel belongs to.
position: :class:`int`
The position in the channel list. This is a number that starts at 0. e.g. the
top channel is position 0.
bitrate: :class:`int`
The channel's preferred audio bitrate in bits per second.
user_limit: :class:`int`
The channel's limit for number of members that can be in a voice channel.
"""
__slots__ = ('name', 'id', 'guild', 'bitrate', 'user_limit',
'_state', 'position', '_overwrites', 'category_id')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def __repr__(self):
return '<VoiceChannel id={0.id} name={0.name!r} position={0.position}>'.format(self)
def _get_voice_client_key(self):
return self.guild.id, 'guild_id'
def _get_voice_state_pair(self):
return self.guild.id, self.id
@property
def _type(self):
return ChannelType.voice.value
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.position = data['position']
self.bitrate = data.get('bitrate')
self.user_limit = data.get('user_limit')
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.voice.value
@property
def members(self):
"""Returns a list of :class:`Member` that are currently inside this voice channel."""
ret = []
for user_id, state in self.guild._voice_states.items():
if state.channel.id == self.id:
member = self.guild.get_member(user_id)
if member is not None:
ret.append(member)
return ret
def permissions_for(self, member):
base = super().permissions_for(member)
# voice channels cannot be edited by people who can't connect to them
# It also implicitly denies all other voice perms
if not base.connect:
denied = Permissions.voice()
denied.update(manage_channels=True, manage_roles=True)
base.value &= ~denied.value
return base
permissions_for.__doc__ = discord.abc.GuildChannel.permissions_for.__doc__
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'bitrate': self.bitrate,
'user_limit': self.user_limit
}, name=name, reason=reason)
clone.__doc__ = discord.abc.GuildChannel.clone.__doc__
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
----------
name: :class:`str`
The new channel's name.
bitrate: :class:`int`
The new channel's bitrate.
user_limit: :class:`int`
The new channel's user limit.
position: :class:`int`
The new channel's position.
sync_permissions: :class:`bool`
Whether to sync permissions with the channel's new or pre-existing
category. Defaults to ``False``.
category: Optional[:class:`CategoryChannel`]
The new category for this channel. Can be ``None`` to remove the
category.
reason: Optional[:class:`str`]
The reason for editing this channel. Shows up on the audit log.
Raises
------
Forbidden
You do not have permissions to edit the channel.
HTTPException
Editing the channel failed.
"""
await self._edit(options, reason=reason)
class CategoryChannel(discord.abc.GuildChannel, Hashable):
"""Represents a Discord channel category.
These are useful to group channels to logical compartments.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the category's hash.
.. describe:: str(x)
Returns the category's name.
Attributes
-----------
name: :class:`str`
The category name.
guild: :class:`Guild`
The guild the category belongs to.
id: :class:`int`
The category channel ID.
position: :class:`int`
The position in the category list. This is a number that starts at 0. e.g. the
top category is position 0.
"""
__slots__ = ('name', 'id', 'guild', 'nsfw', '_state', 'position', '_overwrites', 'category_id')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def __repr__(self):
return '<CategoryChannel id={0.id} name={0.name!r} position={0.position}>'.format(self)
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.nsfw = data.get('nsfw', False)
self.position = data['position']
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.category.value
@property
def _type(self):
return ChannelType.category.value
def is_nsfw(self):
"""Checks if the category is NSFW."""
return self.nsfw
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'nsfw': self.nsfw
}, name=name, reason=reason)
clone.__doc__ = discord.abc.GuildChannel.clone.__doc__
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
----------
name: :class:`str`
The new category's name.
position: :class:`int`
The new category's position.
nsfw: :class:`bool`
To mark the category as NSFW or not.
reason: Optional[:class:`str`]
The reason for editing this category. Shows up on the audit log.
Raises
------
InvalidArgument
If position is less than 0 or greater than the number of categories.
Forbidden
You do not have permissions to edit the category.
HTTPException
Editing the category failed.
"""
try:
position = options.pop('position')
except KeyError:
pass
else:
await self._move(position, reason=reason)
self.position = position
if options:
data = await self._state.http.edit_channel(self.id, reason=reason, **options)
self._update(self.guild, data)
@property
def channels(self):
"""List[:class:`abc.GuildChannel`]: Returns the channels that are under this category.
These are sorted by the official Discord UI, which places voice channels below the text channels.
"""
def comparator(channel):
return (not isinstance(channel, TextChannel), channel.position)
ret = [c for c in self.guild.channels if c.category_id == self.id]
ret.sort(key=comparator)
return ret
@property
def text_channels(self):
"""List[:class:`TextChannel`]: Returns the text channels that are under this category."""
ret = [c for c in self.guild.channels
if c.category_id == self.id
and isinstance(c, TextChannel)]
ret.sort(key=lambda c: (c.position, c.id))
return ret
@property
def voice_channels(self):
"""List[:class:`VoiceChannel`]: Returns the voice channels that are under this category."""
ret = [c for c in self.guild.channels
if c.category_id == self.id
and isinstance(c, VoiceChannel)]
ret.sort(key=lambda c: (c.position, c.id))
return ret
async def create_text_channel(self, name, *, overwrites=None, reason=None, **options):
"""|coro|
A shortcut method to :meth:`Guild.create_text_channel` to create a :class:`TextChannel` in the category.
"""
return await self.guild.create_text_channel(name, overwrites=overwrites, category=self, reason=reason, **options)
async def create_voice_channel(self, name, *, overwrites=None, reason=None, **options):
"""|coro|
A shortcut method to :meth:`Guild.create_voice_channel` to create a :class:`VoiceChannel` in the category.
"""
return await self.guild.create_voice_channel(name, overwrites=overwrites, category=self, reason=reason, **options)
class StoreChannel(discord.abc.GuildChannel, Hashable):
"""Represents a Discord guild store channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns the channel's name.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`Guild`
The guild the channel belongs to.
id: :class:`int`
The channel ID.
category_id: :class:`int`
The category channel ID this channel belongs to.
position: :class:`int`
The position in the channel list. This is a number that starts at 0. e.g. the
top channel is position 0.
"""
__slots__ = ('name', 'id', 'guild', '_state', 'nsfw',
'category_id', 'position', '_overwrites',)
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def __repr__(self):
return '<StoreChannel id={0.id} name={0.name!r} position={0.position}>'.format(self)
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.position = data['position']
self.nsfw = data.get('nsfw', False)
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.text.value
@property
def _type(self):
return ChannelType.store.value
def permissions_for(self, member):
base = super().permissions_for(member)
# store channels do not have voice related permissions
denied = Permissions.voice()
base.value &= ~denied.value
return base
permissions_for.__doc__ = discord.abc.GuildChannel.permissions_for.__doc__
def is_nsfw(self):
"""Checks if the channel is NSFW."""
return self.nsfw
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'nsfw': self.nsfw
}, name=name, reason=reason)
clone.__doc__ = discord.abc.GuildChannel.clone.__doc__
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
----------
name: :class:`str`
The new channel name.
position: :class:`int`
The new channel's position.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
sync_permissions: :class:`bool`
Whether to sync permissions with the channel's new or pre-existing
category. Defaults to ``False``.
category: Optional[:class:`CategoryChannel`]
The new category for this channel. Can be ``None`` to remove the
category.
reason: Optional[:class:`str`]
The reason for editing this channel. Shows up on the audit log.
Raises
------
InvalidArgument
If position is less than 0 or greater than the number of channels.
Forbidden
You do not have permissions to edit the channel.
HTTPException
Editing the channel failed.
"""
await self._edit(options, reason=reason)
class DMChannel(discord.abc.Messageable, Hashable):
"""Represents a Discord direct message channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns a string representation of the channel
Attributes
----------
recipient: :class:`User`
The user you are participating with in the direct message channel.
me: :class:`ClientUser`
The user presenting yourself.
id: :class:`int`
The direct message channel ID.
"""
__slots__ = ('id', 'recipient', 'me', '_state')
def __init__(self, *, me, state, data):
self._state = state
self.recipient = state.store_user(data['recipients'][0])
self.me = me
self.id = int(data['id'])
async def _get_channel(self):
return self
def __str__(self):
return 'Direct Message with %s' % self.recipient
def __repr__(self):
return '<DMChannel id={0.id} recipient={0.recipient!r}>'.format(self)
@property
def _type(self):
return ChannelType.private.value
@property
def created_at(self):
"""Returns the direct message channel's creation time in UTC."""
return utils.snowflake_time(self.id)
def permissions_for(self, user=None):
"""Handles permission resolution for a :class:`User`.
This function is there for compatibility with other channel types.
Actual direct messages do not really have the concept of permissions.
This returns all the Text related permissions set to true except:
- send_tts_messages: You cannot send TTS messages in a DM.
- manage_messages: You cannot delete others messages in a DM.
Parameters
-----------
user: :class:`User`
The user to check permissions for. This parameter is ignored
but kept for compatibility.
Returns
--------
:class:`Permissions`
The resolved permissions.
"""
base = Permissions.text()
base.send_tts_messages = False
base.manage_messages = False
return base
class GroupChannel(discord.abc.Messageable, Hashable):
"""Represents a Discord group channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns a string representation of the channel
Attributes
----------
recipients: :class:`list` of :class:`User`
The users you are participating with in the group channel.
me: :class:`ClientUser`
The user presenting yourself.
id: :class:`int`
The group channel ID.
owner: :class:`User`
The user that owns the group channel.
icon: Optional[:class:`str`]
The group channel's icon hash if provided.
name: Optional[:class:`str`]
The group channel's name if provided.
"""
__slots__ = ('id', 'recipients', 'owner', 'icon', 'name', 'me', '_state')
def __init__(self, *, me, state, data):
self._state = state
self.id = int(data['id'])
self.me = me
self._update_group(data)
def _update_group(self, data):
owner_id = utils._get_as_snowflake(data, 'owner_id')
self.icon = data.get('icon')
self.name = data.get('name')
try:
self.recipients = [self._state.store_user(u) for u in data['recipients']]
except KeyError:
pass
if owner_id == self.me.id:
self.owner = self.me
else:
self.owner = utils.find(lambda u: u.id == owner_id, self.recipients)
async def _get_channel(self):
return self
def __str__(self):
if self.name:
return self.name
if len(self.recipients) == 0:
return 'Unnamed'
return ', '.join(map(lambda x: x.name, self.recipients))
def __repr__(self):
return '<GroupChannel id={0.id} name={0.name!r}>'.format(self)
@property
def _type(self):
return ChannelType.group.value
@property
def icon_url(self):
""":class:`Asset`: Returns the channel's icon asset if available."""
return Asset._from_icon(self._state, self, 'channel')
@property
def created_at(self):
"""Returns the channel's creation time in UTC."""
return utils.snowflake_time(self.id)
def permissions_for(self, user):
"""Handles permission resolution for a :class:`User`.
This function is there for compatibility with other channel types.
Actual direct messages do not really have the concept of permissions.
This returns all the Text related permissions set to true except:
- send_tts_messages: You cannot send TTS messages in a DM.
- manage_messages: You cannot delete others messages in a DM.
This also checks the kick_members permission if the user is the owner.
Parameters
-----------
user: :class:`User`
The user to check permissions for.
Returns
--------
:class:`Permissions`
The resolved permissions for the user.
"""
base = Permissions.text()
base.send_tts_messages = False
base.manage_messages = False
base.mention_everyone = True
if user.id == self.owner.id:
base.kick_members = True
return base
async def add_recipients(self, *recipients):
r"""|coro|
Adds recipients to this group.
A group can only have a maximum of 10 members.
Attempting to add more ends up in an exception. To
add a recipient to the group, you must have a relationship
with the user of type :attr:`RelationshipType.friend`.
Parameters
-----------
\*recipients: :class:`User`
An argument list of users to add to this group.
Raises
-------
HTTPException
Adding a recipient to this group failed.
"""
# TODO: wait for the corresponding WS event
req = self._state.http.add_group_recipient
for recipient in recipients:
await req(self.id, recipient.id)
async def remove_recipients(self, *recipients):
r"""|coro|
Removes recipients from this group.
Parameters
-----------
\*recipients: :class:`User`
An argument list of users to remove from this group.
Raises
-------
HTTPException
Removing a recipient from this group failed.
"""
# TODO: wait for the corresponding WS event
req = self._state.http.remove_group_recipient
for recipient in recipients:
await req(self.id, recipient.id)
async def edit(self, **fields):
"""|coro|
Edits the group.
Parameters
-----------
name: Optional[:class:`str`]
The new name to change the group to.
Could be ``None`` to remove the name.
icon: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the new icon.
Could be ``None`` to remove the icon.
Raises
-------
HTTPException
Editing the group failed.
"""
try:
icon_bytes = fields['icon']
except KeyError:
pass
else:
if icon_bytes is not None:
fields['icon'] = utils._bytes_to_base64_data(icon_bytes)
data = await self._state.http.edit_group(self.id, **fields)
self._update_group(data)
async def leave(self):
"""|coro|
Leave the group.
If you are the only one in the group, this deletes it as well.
Raises
-------
HTTPException
Leaving the group failed.
"""
await self._state.http.leave_group(self.id)
def _channel_factory(channel_type):
value = try_enum(ChannelType, channel_type)
if value is ChannelType.text:
return TextChannel, value
elif value is ChannelType.voice:
return VoiceChannel, value
elif value is ChannelType.private:
return DMChannel, value
elif value is ChannelType.category:
return CategoryChannel, value
elif value is ChannelType.group:
return GroupChannel, value
elif value is ChannelType.news:
return TextChannel, value
elif value is ChannelType.store:
return StoreChannel, value
else:
return None, value
| |
from conans.client.local_file_getter import get_path
from conans.client.output import ScopedOutput
from conans.util.files import rmdir, mkdir
from conans.model.ref import PackageReference
from conans.errors import (ConanException, ConanConnectionError, ConanOutdatedClient,
NotFoundException)
from conans.client.remote_registry import RemoteRegistry
from conans.util.log import logger
import os
from conans.paths import EXPORT_SOURCES_TGZ_NAME
from conans.client.remover import DiskRemover
from conans.util.tracer import log_package_got_from_local_cache,\
log_recipe_got_from_local_cache
from conans.client.loader_parse import load_conanfile_class
class ConanProxy(object):
""" Class to access the conan storage, to perform typical tasks as to get packages,
getting conanfiles, uploading, removing from remote, etc.
It uses the RemoteRegistry to control where the packages come from.
"""
def __init__(self, client_cache, user_io, remote_manager, remote_name,
update=False, check_updates=False, manifest_manager=False):
self._client_cache = client_cache
self._out = user_io.out
self._remote_manager = remote_manager
self._registry = RemoteRegistry(self._client_cache.registry, self._out)
self._remote_name = remote_name
self._update = update
self._check_updates = check_updates or update # Update forces check (and of course the update)
self._manifest_manager = manifest_manager
@property
def registry(self):
return self._registry
def package_available(self, package_ref, short_paths, check_outdated):
"""
Returns True if there is a local or remote package available (and up to date if check_outdated).
It wont download the package, just check its hash
"""
output = ScopedOutput(str(package_ref.conan), self._out)
package_folder = self._client_cache.package(package_ref, short_paths=short_paths)
remote_info = None
# No package in local cache
if not os.path.exists(package_folder):
try:
remote_info = self.get_package_info(package_ref)
except ConanException:
return False # Not local nor remote
# Maybe we have the package (locally or in remote) but it's outdated
if check_outdated:
if remote_info:
package_hash = remote_info.recipe_hash
else:
package_hash = self._client_cache.read_package_recipe_hash(package_folder)
local_recipe_hash = self._client_cache.load_manifest(package_ref.conan).summary_hash
up_to_date = local_recipe_hash == package_hash
if not up_to_date:
output.info("Outdated package!")
else:
output.info("Package is up to date")
return up_to_date
return True
def get_package(self, package_ref, short_paths):
""" obtain a package, either from disk or retrieve from remotes if necessary
and not necessary to build
"""
output = ScopedOutput(str(package_ref.conan), self._out)
package_folder = self._client_cache.package(package_ref, short_paths=short_paths)
# Check current package status
if os.path.exists(package_folder):
if self._check_updates:
read_manifest = self._client_cache.load_package_manifest(package_ref)
try: # get_conan_digest can fail, not in server
upstream_manifest = self.get_package_digest(package_ref)
if upstream_manifest != read_manifest:
if upstream_manifest.time > read_manifest.time:
output.warn("Current package is older than remote upstream one")
if self._update:
output.warn("Removing it to retrieve or build an updated one")
rmdir(package_folder)
else:
output.warn("Current package is newer than remote upstream one")
except ConanException:
pass
installed = False
local_package = os.path.exists(package_folder)
if local_package:
output.success('Already installed!')
installed = True
log_package_got_from_local_cache(package_ref)
else:
installed = self._retrieve_remote_package(package_ref, package_folder,
output)
self.handle_package_manifest(package_ref, installed)
return installed
def handle_package_manifest(self, package_ref, installed):
if installed and self._manifest_manager:
remote = self._registry.get_ref(package_ref.conan)
self._manifest_manager.check_package(package_ref, remote)
def get_recipe_sources(self, conan_reference, short_paths=False):
export_path = self._client_cache.export(conan_reference)
sources_folder = self._client_cache.export_sources(conan_reference, short_paths)
if os.path.exists(sources_folder):
return
current_remote = self._registry.get_ref(conan_reference)
if not current_remote:
raise ConanException("Error while trying to get recipe sources for %s. "
"No remote defined" % str(conan_reference))
else:
self._remote_manager.get_recipe_sources(conan_reference, export_path, sources_folder,
current_remote)
def get_recipe(self, conan_reference):
with self._client_cache.conanfile_write_lock(conan_reference):
result = self._get_recipe(conan_reference)
return result
def _get_recipe(self, conan_reference):
output = ScopedOutput(str(conan_reference), self._out)
# check if it is in disk
conanfile_path = self._client_cache.conanfile(conan_reference)
if os.path.exists(conanfile_path):
log_recipe_got_from_local_cache(conan_reference)
if self._check_updates:
ret = self.update_available(conan_reference)
if ret != 0: # Found and not equal
remote, ref_remote = self._get_remote(conan_reference)
if ret == 1:
if not self._update:
if remote != ref_remote: # Forced new remote
output.warn("There is a new conanfile in '%s' remote. "
"Execute 'install -u -r %s' to update it."
% (remote.name, remote.name))
else:
output.warn("There is a new conanfile in '%s' remote. "
"Execute 'install -u' to update it."
% remote.name)
output.warn("Refused to install!")
else:
export_path = self._client_cache.export(conan_reference)
DiskRemover(self._client_cache).remove(conan_reference)
output.info("Retrieving from remote '%s'..." % remote.name)
self._remote_manager.get_recipe(conan_reference, export_path, remote)
output.info("Updated!")
elif ret == -1:
if not self._update:
output.info("Current conanfile is newer "
"than %s's one" % remote.name)
else:
output.error("Current conanfile is newer than %s's one. "
"Run 'conan remove %s' and run install again "
"to replace it." % (remote.name, conan_reference))
else:
self._retrieve_recipe(conan_reference, output)
if self._manifest_manager:
# Just make sure that the recipe sources are there to check
conanfile = load_conanfile_class(conanfile_path)
self.get_recipe_sources(conan_reference, conanfile.short_paths)
remote = self._registry.get_ref(conan_reference)
self._manifest_manager.check_recipe(conan_reference, remote)
return conanfile_path
def update_available(self, conan_reference):
"""Returns 0 if the conanfiles are equal, 1 if there is an update and -1 if
the local is newer than the remote"""
if not conan_reference:
return 0
read_manifest, _ = self._client_cache.conan_manifests(conan_reference)
if read_manifest:
try: # get_conan_digest can fail, not in server
upstream_manifest = self.get_conan_digest(conan_reference)
if upstream_manifest != read_manifest:
return 1 if upstream_manifest.time > read_manifest.time else -1
except ConanException:
pass
return 0
def _retrieve_recipe(self, conan_reference, output):
""" returns the requested conanfile object, retrieving it from
remotes if necessary. Can raise NotFoundException
"""
def _retrieve_from_remote(remote):
output.info("Trying with '%s'..." % remote.name)
export_path = self._client_cache.export(conan_reference)
result = self._remote_manager.get_recipe(conan_reference, export_path, remote)
self._registry.set_ref(conan_reference, remote)
return result
if self._remote_name:
output.info("Not found, retrieving from server '%s' " % self._remote_name)
remote = self._registry.remote(self._remote_name)
return _retrieve_from_remote(remote)
else:
ref_remote = self._registry.get_ref(conan_reference)
if ref_remote:
output.info("Retrieving from predefined remote '%s'" % ref_remote.name)
return _retrieve_from_remote(ref_remote)
else:
output.info("Not found in local cache, looking in remotes...")
remotes = self._registry.remotes
for remote in remotes:
logger.debug("Trying with remote %s" % remote.name)
try:
return _retrieve_from_remote(remote)
# If exception continue with the next
except (ConanOutdatedClient, ConanConnectionError) as exc:
output.warn(str(exc))
if remote == remotes[-1]: # Last element not found
raise ConanConnectionError("All remotes failed")
except NotFoundException as exc:
if remote == remotes[-1]: # Last element not found
logger.debug("Not found in any remote, raising...%s" % exc)
raise NotFoundException("Unable to find '%s' in remotes"
% str(conan_reference))
raise ConanException("No remote defined")
def complete_recipe_sources(self, conanfile, conan_reference, force_complete=True, short_paths=False):
sources_folder = self._client_cache.export_sources(conan_reference, short_paths)
if not hasattr(conanfile, "exports_sources"):
mkdir(sources_folder)
return None
ignore_deleted_file = None
if not os.path.exists(sources_folder):
# If not path to sources exists, we have a problem, at least an empty folder
# should be there
upload_remote, current_remote = self._get_remote(conan_reference)
if not current_remote:
raise ConanException("Trying to upload a package recipe without sources, "
"and the remote for the sources no longer exists")
if force_complete or current_remote != upload_remote:
# If uploading to a different remote than the one from which the recipe
# was retrieved, we definitely need to get the sources, so the recipe is complete
self.get_recipe_sources(conan_reference, short_paths=short_paths)
else:
# But if same remote, no need to upload again the TGZ, it is already in the server
# But the upload API needs to know it to not remove the server file.
ignore_deleted_file = EXPORT_SOURCES_TGZ_NAME
return ignore_deleted_file
def upload_recipe(self, conan_reference, retry, retry_wait, skip_upload):
""" upload to defined remote in (-r=remote), to current remote
or to default remote, in that order.
If the remote is not set, set it
"""
conan_file_path = self._client_cache.conanfile(conan_reference)
conanfile = load_conanfile_class(conan_file_path)
ignore_deleted_file = self.complete_recipe_sources(conanfile, conan_reference,
force_complete=False,
short_paths=conanfile.short_paths)
remote, ref_remote = self._get_remote(conan_reference)
result = self._remote_manager.upload_recipe(conan_reference, remote, retry, retry_wait,
ignore_deleted_file=ignore_deleted_file,
skip_upload=skip_upload)
if not ref_remote and not skip_upload:
self._registry.set_ref(conan_reference, remote)
return result
def _get_remote(self, conan_ref=None):
# Prioritize -r , then reference registry and then the default remote
ref_remote = self._registry.get_ref(conan_ref) if conan_ref else None
if self._remote_name:
remote = self._registry.remote(self._remote_name)
else:
if ref_remote:
remote = ref_remote
else:
remote = self._registry.default_remote
return remote, ref_remote
def upload_package(self, package_ref, retry, retry_wait, skip_upload, integrity_check):
remote, current_remote = self._get_remote(package_ref.conan)
if not current_remote:
self._out.warn("Remote for '%s' not defined, uploading to %s"
% (str(package_ref.conan), remote.name))
result = self._remote_manager.upload_package(package_ref, remote, retry, retry_wait,
skip_upload, integrity_check)
if not current_remote and not skip_upload:
self._registry.set_ref(package_ref.conan, remote)
return result
def get_conan_digest(self, conan_ref):
""" used by update to check the date of packages, require force if older
"""
remote, current_remote = self._get_remote(conan_ref)
result = self._remote_manager.get_conan_digest(conan_ref, remote)
if not current_remote:
self._registry.set_ref(conan_ref, remote)
return result
def get_package_digest(self, package_ref):
""" used by update to check the date of packages, require force if older
"""
remote, ref_remote = self._get_remote(package_ref.conan)
result = self._remote_manager.get_package_digest(package_ref, remote)
if not ref_remote:
self._registry.set_ref(package_ref.conan, remote)
return result
def get_package_info(self, package_ref):
""" Gets the package info to check if outdated
"""
remote, ref_remote = self._get_remote(package_ref.conan)
result = self._remote_manager.get_package_info(package_ref, remote)
if not ref_remote:
self._registry.set_ref(package_ref.conan, remote)
return result
def search(self, pattern=None, ignorecase=True):
remote, _ = self._get_remote()
return self._remote_manager.search(remote, pattern, ignorecase)
def search_remotes(self, pattern=None, ignorecase=True):
if self._remote_name:
remote = self._registry.remote(self._remote_name)
search_result = self._remote_manager.search(remote, pattern, ignorecase)
return search_result
for remote in self._registry.remotes:
search_result = self._remote_manager.search(remote, pattern, ignorecase)
if search_result:
return search_result
def search_packages(self, reference, query):
remote, _ = self._get_remote()
return self._remote_manager.search_packages(remote, reference, query)
def remove(self, conan_ref):
if not self._remote_name:
raise ConanException("Cannot remove, remote not defined")
remote = self._registry.remote(self._remote_name)
result = self._remote_manager.remove(conan_ref, remote)
current_remote = self._registry.get_ref(conan_ref)
if current_remote == remote:
self._registry.remove_ref(conan_ref)
return result
def remove_packages(self, conan_ref, remove_ids):
if not self._remote_name:
raise ConanException("Cannot remove, remote not defined")
remote = self._registry.remote(self._remote_name)
return self._remote_manager.remove_packages(conan_ref, remove_ids, remote)
def get_path(self, conan_ref, package_id, path):
if not self._remote_name:
return get_path(self._client_cache, conan_ref, package_id, path)
else:
remote = self._registry.remote(self._remote_name)
return self._remote_manager.get_path(conan_ref, package_id, path, remote)
def download_packages(self, reference, package_ids):
assert(isinstance(package_ids, list))
remote, _ = self._get_remote(reference)
export_path = self._client_cache.export(reference)
self._remote_manager.get_recipe(reference, export_path, remote)
conanfile_path = self._client_cache.conanfile(reference)
conanfile = load_conanfile_class(conanfile_path)
short_paths = conanfile.short_paths
self._registry.set_ref(reference, remote)
output = ScopedOutput(str(reference), self._out)
for package_id in package_ids:
package_ref = PackageReference(reference, package_id)
package_folder = self._client_cache.package(package_ref, short_paths=short_paths)
self._out.info("Downloading %s" % str(package_ref))
self._retrieve_remote_package(package_ref, package_folder, output, remote)
def _retrieve_remote_package(self, package_ref, package_folder, output, remote=None):
if remote is None:
remote = self._registry.get_ref(package_ref.conan)
if not remote:
output.warn("Package doesn't have a remote defined. "
"Probably created locally and not uploaded")
return False
package_id = str(package_ref.package_id)
try:
output.info("Looking for package %s in remote '%s' " % (package_id, remote.name))
# Will raise if not found NotFoundException
self._remote_manager.get_package(package_ref, package_folder, remote)
output.success('Package installed %s' % package_id)
return True
except ConanConnectionError:
raise # This shouldn't be skipped
except ConanException as e:
output.warn('Binary for %s not in remote: %s' % (package_id, str(e)))
return False
def authenticate(self, name, password):
if not name: # List all users, from all remotes
remotes = self._registry.remotes
if not remotes:
self._out.error("No remotes defined")
for remote in remotes:
self._remote_manager.authenticate(remote, None, None)
return
remote, _ = self._get_remote()
return self._remote_manager.authenticate(remote, name, password)
| |
"""Options manager for :class:`~diofant.polys.polytools.Poly` and public API functions."""
from __future__ import annotations
import re
import typing
from ..core import Basic, I
from ..core.sympify import sympify
from ..utilities import has_dups, numbered_symbols, topological_sort
from .polyerrors import FlagError, GeneratorsError, OptionError
__all__ = 'Options', 'Order'
class Option:
"""Base class for all kinds of options."""
option: str
is_Flag = False
requires: list[str] = []
excludes: list[str] = []
after: list[str] = []
before: list[str] = []
@classmethod
def default(cls):
return
@classmethod
def preprocess(cls, option):
return # pragma: no cover
@classmethod
def postprocess(cls, options):
return
class Flag(Option):
"""Base class for all kinds of flags."""
is_Flag = True
class BooleanOption(Option):
"""An option that must have a boolean value or equivalent assigned."""
@classmethod
def preprocess(cls, option):
if option in [True, False]:
return bool(option)
else:
raise OptionError(f"'{cls.option}' must have a boolean value "
f'assigned, got {option}')
class OptionType(type):
"""Base type for all options that does registers options."""
def __init__(cls, *args, **kwargs):
super().__init__(cls)
@property
def getter(a):
try:
return a[cls.option]
except KeyError:
return cls.default()
setattr(Options, cls.option, getter)
Options.__options__[cls.option] = cls
class Options(dict):
"""
Options manager for polynomial manipulation module.
Examples
========
>>> Options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
>>> build_options((x, y, z), {'domain': 'ZZ'})
{'auto': False, 'domain': ZZ, 'gens': (x, y, z)}
**Options**
* Expand --- boolean option
* Gens --- option
* Wrt --- option
* Sort --- option
* Order --- option
* Field --- boolean option
* Greedy --- boolean option
* Domain --- option
* Split --- boolean option
* Gaussian --- boolean option
* Extension --- option
* Modulus --- option
* Symmetric --- boolean option
* Strict --- boolean option
**Flags**
* Auto --- boolean flag
* Frac --- boolean flag
* Formal --- boolean flag
* Polys --- boolean flag
* Include --- boolean flag
* All --- boolean flag
* Gen --- flag
"""
__order__: typing.Optional[list[str]] = None
__options__: dict[str, type[Option]] = {}
def __init__(self, gens, args, flags=None, strict=False):
dict.__init__(self)
if gens and args.get('gens', ()):
raise OptionError("both '*gens' and keyword "
"argument 'gens' supplied")
if gens:
args = dict(args)
args['gens'] = gens
defaults = args.pop('defaults', {})
def preprocess_options(args):
for option, value in args.items():
try:
cls = self.__options__[option]
except KeyError as exc:
raise OptionError(f"'{option}' is not a "
'valid option') from exc
if issubclass(cls, Flag):
if strict and (flags is None or option not in flags):
raise OptionError(f"'{option}' flag is not "
'allowed in this context')
if value is not None:
self[option] = cls.preprocess(value)
preprocess_options(args)
for key in dict(defaults):
if key in self:
del defaults[key]
else:
for option in self:
cls = self.__options__[option]
if key in cls.excludes:
del defaults[key]
break
preprocess_options(defaults)
for option in self:
cls = self.__options__[option]
for exclude_option in cls.excludes:
if self.get(exclude_option) is not None:
raise OptionError(f"'{option}' option is not allowed together with '{exclude_option}'")
for option in self.__order__: # pylint: disable=not-an-iterable
self.__options__[option].postprocess(self)
@classmethod
def _init_dependencies_order(cls):
"""Resolve the order of options' processing."""
if cls.__order__ is None:
vertices, edges = [], set()
for name, option in cls.__options__.items():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError as exc:
raise RuntimeError('cycle detected in diofant.polys'
' options framework') from exc
def clone(self, updates={}):
"""Clone ``self`` and update specified options."""
obj = dict.__new__(self.__class__)
for option, value in self.items():
obj[option] = value
for option, value in updates.items():
obj[option] = value
return obj
def __setattr__(self, attr, value):
if attr in self.__options__:
self[attr] = value
else:
super().__setattr__(attr, value)
@property
def args(self):
args = {}
for option, value in self.items():
if value is not None and option != 'gens':
cls = self.__options__[option]
if not issubclass(cls, Flag):
args[option] = value
return args
@property
def options(self):
options = {}
for option, cls in self.__options__.items():
if not issubclass(cls, Flag):
options[option] = getattr(self, option)
return options
@property
def flags(self):
flags = {}
for option, cls in self.__options__.items():
if issubclass(cls, Flag):
flags[option] = getattr(self, option)
return flags
class Expand(BooleanOption, metaclass=OptionType):
"""``expand`` option to polynomial manipulation functions."""
option = 'expand'
@classmethod
def default(cls):
return True
class Gens(Option, metaclass=OptionType):
"""``gens`` option to polynomial manipulation functions."""
option = 'gens'
@classmethod
def default(cls):
return ()
@classmethod
def preprocess(cls, option):
if isinstance(option, Basic):
option = option,
if option == (None,):
return ()
elif has_dups(option):
raise GeneratorsError(f'duplicated generators: {option}')
elif any(gen.is_commutative is False for gen in option):
raise GeneratorsError(f'non-commutative generators: {option}')
else:
return tuple(option)
class Wrt(Option, metaclass=OptionType):
"""``wrt`` option to polynomial manipulation functions."""
option = 'wrt'
_re_split = re.compile(r'\s*,\s*|\s+')
@classmethod
def preprocess(cls, option):
if isinstance(option, Basic):
return [str(option)]
elif isinstance(option, str):
option = option.strip()
if option.endswith(','):
raise OptionError('Bad input: missing parameter.')
if not option:
return []
return list(cls._re_split.split(option))
elif hasattr(option, '__getitem__'):
return list(map(str, option))
else:
raise OptionError("invalid argument for 'wrt' option")
class Sort(Option, metaclass=OptionType):
"""``sort`` option to polynomial manipulation functions."""
option = 'sort'
@classmethod
def default(cls):
return []
@classmethod
def preprocess(cls, option):
if isinstance(option, str):
return [gen.strip() for gen in option.split('>')]
elif hasattr(option, '__getitem__'):
return list(map(str, option))
else:
raise OptionError("invalid argument for 'sort' option")
class Order(Option, metaclass=OptionType):
"""``order`` option to polynomial manipulation functions."""
option = 'order'
@classmethod
def default(cls):
from .orderings import lex
return lex
@classmethod
def preprocess(cls, option):
from .orderings import monomial_key
return monomial_key(option)
class Field(BooleanOption, metaclass=OptionType):
"""``field`` option to polynomial manipulation functions."""
option = 'field'
excludes = ['domain', 'split', 'gaussian']
class Greedy(BooleanOption, metaclass=OptionType):
"""``greedy`` option to polynomial manipulation functions."""
option = 'greedy'
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus']
class Composite(BooleanOption, metaclass=OptionType):
"""``composite`` option to polynomial manipulation functions."""
option = 'composite'
@classmethod
def default(cls):
return
excludes = ['domain', 'split', 'gaussian', 'modulus']
class Domain(Option, metaclass=OptionType):
"""``domain`` option to polynomial manipulation functions."""
option = 'domain'
excludes = ['field', 'greedy', 'split', 'gaussian', 'extension']
after = ['gens']
_re_realfield = re.compile(r'^(R|RR)(_(\d+))?$')
_re_complexfield = re.compile(r'^(C|CC)(_(\d+))?$')
_re_finitefield = re.compile(r'^(FF|GF)\((\d+)\)$')
_re_polynomial = re.compile(r'^(Z|ZZ|Q|QQ)\[(.+)\]$')
_re_fraction = re.compile(r'^(Z|ZZ|Q|QQ)\((.+)\)$')
_re_algebraic = re.compile(r'^(Q|QQ)\<(.+)\>$')
@classmethod
def preprocess(cls, option):
from .. import domains
if isinstance(option, domains.Domain):
return option
elif isinstance(option, str):
if option in ['Z', 'ZZ']:
return domains.ZZ
if option in ['Q', 'QQ']:
return domains.QQ
if option == 'EX':
return domains.EX
r = cls._re_realfield.match(option)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return domains.RR
else:
return domains.RealField(int(prec))
r = cls._re_complexfield.match(option)
if r is not None:
_, _, prec = r.groups()
if prec is None:
return domains.CC
else:
return domains.ComplexField(int(prec))
r = cls._re_finitefield.match(option)
if r is not None:
return domains.FF(int(r.groups()[1]))
r = cls._re_polynomial.match(option)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return domains.ZZ.inject(*gens)
else:
return domains.QQ.inject(*gens)
r = cls._re_fraction.match(option)
if r is not None:
ground, gens = r.groups()
gens = list(map(sympify, gens.split(',')))
if ground in ['Z', 'ZZ']:
return domains.ZZ.inject(*gens).field
else:
return domains.QQ.inject(*gens).field
r = cls._re_algebraic.match(option)
if r is not None:
gens = list(map(sympify, r.groups()[1].split(',')))
return domains.QQ.algebraic_field(*gens)
raise OptionError('expected a valid domain specification, '
f'got {option}')
@classmethod
def postprocess(cls, options):
from .. import domains
from ..domains.compositedomain import CompositeDomain
if 'gens' in options and 'domain' in options and isinstance(options['domain'], CompositeDomain) and \
(set(options['domain'].symbols) & set(options['gens'])):
raise GeneratorsError('ground domain and generators '
'interfere together')
if ('gens' not in options or not options['gens']) and \
'domain' in options and options['domain'] == domains.EX:
raise GeneratorsError('you have to provide generators because'
' EX domain was requested')
class Split(BooleanOption, metaclass=OptionType):
"""``split`` option to polynomial manipulation functions."""
option = 'split'
excludes = ['field', 'greedy', 'domain', 'gaussian', 'extension', 'modulus']
@classmethod
def postprocess(cls, options):
if 'split' in options:
raise NotImplementedError("'split' option is not implemented yet")
class Gaussian(BooleanOption, metaclass=OptionType):
"""``gaussian`` option to polynomial manipulation functions."""
option = 'gaussian'
excludes = ['field', 'greedy', 'domain', 'split', 'extension', 'modulus']
@classmethod
def postprocess(cls, options):
if 'gaussian' in options and options['gaussian'] is True:
options['extension'] = {I}
Extension.postprocess(options)
class Extension(Option, metaclass=OptionType):
"""``extension`` option to polynomial manipulation functions."""
option = 'extension'
excludes = ['greedy', 'domain', 'split', 'gaussian', 'modulus']
@classmethod
def preprocess(cls, option):
if option == 1:
return bool(option)
elif option == 0:
return bool(option)
else:
if not hasattr(option, '__iter__'):
option = {option}
else:
if not option:
option = None
else:
option = set(option)
return option
@classmethod
def postprocess(cls, options):
from .. import domains
if 'extension' in options and options['extension'] not in (True, False):
options['domain'] = domains.QQ.algebraic_field(
*options['extension'])
class Modulus(Option, metaclass=OptionType):
"""``modulus`` option to polynomial manipulation functions."""
option = 'modulus'
excludes = ['greedy', 'split', 'domain', 'gaussian', 'extension']
@classmethod
def preprocess(cls, option):
option = sympify(option)
if option.is_Integer and option > 0:
return int(option)
else:
raise OptionError(
f"'modulus' must a positive integer, got {option}")
@classmethod
def postprocess(cls, options):
from .. import domains
if 'modulus' in options:
modulus = options['modulus']
options['domain'] = domains.FF(modulus)
class Strict(BooleanOption, metaclass=OptionType):
"""``strict`` option to polynomial manipulation functions."""
option = 'strict'
@classmethod
def default(cls):
return True
class Auto(BooleanOption, Flag, metaclass=OptionType):
"""``auto`` flag to polynomial manipulation functions."""
option = 'auto'
after = ['field', 'domain', 'extension', 'gaussian']
@classmethod
def default(cls):
return True
@classmethod
def postprocess(cls, options):
if ('domain' in options or 'field' in options) and 'auto' not in options:
options['auto'] = False
class Frac(BooleanOption, Flag, metaclass=OptionType):
"""``frac`` option to polynomial manipulation functions."""
option = 'frac'
@classmethod
def default(cls):
return False
class Formal(BooleanOption, Flag, metaclass=OptionType):
"""``formal`` flag to polynomial manipulation functions."""
option = 'formal'
@classmethod
def default(cls):
return False
class Polys(BooleanOption, Flag, metaclass=OptionType):
"""``polys`` flag to polynomial manipulation functions."""
option = 'polys'
class Include(BooleanOption, Flag, metaclass=OptionType):
"""``include`` flag to polynomial manipulation functions."""
option = 'include'
@classmethod
def default(cls):
return False
class All(BooleanOption, Flag, metaclass=OptionType):
"""``all`` flag to polynomial manipulation functions."""
option = 'all'
@classmethod
def default(cls):
return False
class Gen(Flag, metaclass=OptionType):
"""``gen`` flag to polynomial manipulation functions."""
option = 'gen'
@classmethod
def default(cls):
return 0
@classmethod
def preprocess(cls, option):
if isinstance(option, (Basic, int)):
return option
else:
raise OptionError("invalid argument for 'gen' option")
class Symbols(Flag, metaclass=OptionType):
"""``symbols`` flag to polynomial manipulation functions."""
option = 'symbols'
@classmethod
def default(cls):
return numbered_symbols('s', start=1)
@classmethod
def preprocess(cls, option):
if hasattr(option, '__iter__'):
return iter(option)
else:
raise OptionError('expected an iterator or '
f'iterable container, got {option}')
class Method(Flag, metaclass=OptionType):
"""``method`` flag to polynomial manipulation functions."""
option = 'method'
@classmethod
def preprocess(cls, option):
if isinstance(option, str):
return option.lower()
else:
raise OptionError(f'expected a string, got {option}')
def build_options(gens, args=None):
"""Construct options from keyword arguments or ... options."""
if args is None:
gens, args = (), gens
if len(args) != 1 or 'opt' not in args or gens:
return Options(gens, args)
else:
return args['opt']
def allowed_flags(args, flags):
"""
Allow specified flags to be used in the given context.
Examples
========
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
"""
flags = set(flags)
for arg in args:
try:
if Options.__options__[arg].is_Flag and arg not in flags:
raise FlagError(f"'{arg}' flag is not allowed "
'in this context')
except KeyError as exc:
raise OptionError(f"'{arg}' is not a valid option") from exc
def set_defaults(options, **defaults):
"""Update options with default values."""
if 'defaults' not in options:
options = dict(options)
options['defaults'] = defaults
return options
Options._init_dependencies_order()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Part of the Keras training engine related to distributed training.
"""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.engine import partial_batch_padding_handler as padding_util
from tensorflow.python.keras.engine import training_arrays
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.mode_keys import ModeKeys
from tensorflow.python.util import nest
def fit_distributed(model,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1):
"""Fit loop for Distribution Strategies."""
distributed_training_utils.validate_callbacks(callbacks, model.optimizer)
distributed_training_utils.validate_inputs(
x, y, model._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
# Until support for partial batch is implemented across all
# functions and distribution strategy, we pass `mode` to selectively
# relax the costraint to consume all the training samples.
steps_per_epoch, batch_size = (
distributed_training_utils.get_input_params(
model._distribution_strategy, first_x_value, steps_per_epoch,
batch_size, mode=ModeKeys.TRAIN))
batch_size = model._validate_or_infer_batch_size(
batch_size, steps_per_epoch, x)
dataset = model._distribution_standardize_user_data(
x, y,
sample_weight=sample_weight,
class_weight=class_weight,
batch_size=batch_size,
validation_split=validation_split,
shuffle=shuffle)
val_dataset = None
if validation_data:
val_x, val_y, val_sample_weights = model._unpack_validation_data(
validation_data)
distributed_training_utils.validate_inputs(
val_x, val_y, model._distribution_strategy)
first_valx_value = nest.flatten(val_x)[0]
if isinstance(first_valx_value, np.ndarray):
validation_steps, _ = distributed_training_utils.get_input_params(
model._distribution_strategy, first_valx_value, validation_steps,
batch_size)
val_dataset = model._distribution_standardize_user_data(
val_x, val_y,
sample_weight=val_sample_weights,
class_weight=None,
batch_size=batch_size,
validation_split=validation_split,
shuffle=shuffle)
elif validation_split:
raise ValueError('validation_split argument is not supported with '
'distribution strategies.')
if distributed_training_utils.is_tpu_strategy(model._distribution_strategy):
return experimental_tpu_fit_loop(
model,
dataset,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_dataset=val_dataset,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq)
else:
return training_arrays.fit_loop(
model,
dataset,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
val_inputs=val_dataset,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq,
steps_name='steps_per_epoch')
def evaluate_distributed(model,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None):
"""Evaluate loop for Distribution Strategies."""
distributed_training_utils.validate_inputs(x, y, model._distribution_strategy)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
steps, batch_size = distributed_training_utils.get_input_params(
model._distribution_strategy, first_x_value, steps, batch_size)
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
dataset = model._distribution_standardize_user_data(
x, y,
sample_weight=sample_weight,
batch_size=batch_size)
if distributed_training_utils.is_tpu_strategy(model._distribution_strategy):
return experimental_tpu_test_loop(
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
else:
return training_arrays.test_loop(
model,
inputs=dataset,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
def predict_distributed(model,
x=None,
batch_size=None,
verbose=0,
steps=None,
callbacks=None):
"""Predict loop for Distribution Strategies."""
distributed_training_utils.validate_inputs(
x, None, model._distribution_strategy, allow_partial_batch=True)
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
steps, batch_size = distributed_training_utils.get_input_params(
model._distribution_strategy, first_x_value, steps,
batch_size, mode=ModeKeys.PREDICT)
batch_size = model._validate_or_infer_batch_size(batch_size, steps, x)
dataset = model._distribution_standardize_user_data(
x,
batch_size=batch_size,
repeat=False,
allow_partial_batch=True)
if distributed_training_utils.is_tpu_strategy(model._distribution_strategy):
return experimental_tpu_predict_loop(
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
else:
return training_arrays.predict_loop(
model,
dataset,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks)
def experimental_tpu_fit_loop(model,
dataset,
epochs=100,
verbose=1,
callbacks=None,
initial_epoch=0,
steps_per_epoch=None,
val_dataset=None,
validation_steps=None,
validation_freq=1):
"""Fit loop for training with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
dataset: Dataset that returns inputs and targets
epochs: Number of times to iterate over the data
verbose: Integer, Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
val_dataset: Dataset for validation data.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
validation_freq: Only relevant if validation data is provided. Integer or
`collections.Container` instance (e.g. list, tuple, etc.). If an
integer, specifies how many training epochs to run before a new
validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
Returns:
Returns `None`.
Raises:
ValueError: in case of invalid arguments.
"""
mode = ModeKeys.TRAIN
# TODO(fchollet): add support for `steps_per_epoch=None` in TPU loops.
current_strategy = model._distribution_strategy
iterator = distributed_training_utils.get_iterator(dataset, current_strategy)
steps_per_epoch = training_utils.infer_steps_for_dataset(
dataset, steps_per_epoch, epochs, steps_name='steps_per_epoch')
if (current_strategy.extended.steps_per_run != 1 and
steps_per_epoch is None):
raise ValueError('`steps_per_epoch` should be specified when calling '
'`fit` on the model with TPUStrategy when '
'`steps_per_run` != 1 .')
scope = distributed_training_utils.distributed_scope(
strategy=current_strategy, learning_phase=1)
scope.__enter__()
def _per_device_fit_function(model):
model._make_fit_function()
return (model._fit_function.inputs, model._fit_function.outputs,
model._fit_function.updates_op, model._fit_function.session_kwargs)
out_labels = model.metrics_names or []
def step_fn(ctx, inputs):
"""Clones the model and calls make_fit_function."""
inputs, targets = inputs
if model._compile_distribution:
distributed_training_utils.clone_model_on_replicas(
model, current_strategy, mode, inputs=inputs, targets=targets)
else:
distributed_training_utils._build_distributed_network(
model, current_strategy, mode, inputs, targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.extended.call_for_each_replica(
_per_device_fit_function,
args=(distributed_training_utils.get_distributed_model(
model, ModeKeys.TRAIN),))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs,
grouped_updates, grouped_session_args)
combined_fn = K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_fit_function',
**all_session_args)
for label, output in zip(out_labels, combined_fn.outputs):
if label == 'loss':
reduce_op = ds_reduce_util.ReduceOp.SUM
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name in model.metrics_names[1:]:
tensor = model._all_stateful_metrics_tensors[name]
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
use_steps = steps_per_epoch is not None
if use_steps:
iteration_value = min(steps_per_epoch,
current_strategy.extended.steps_per_run)
else:
iteration_value = current_strategy.extended.steps_per_run
steps_per_run = K.variable(
value=iteration_value,
dtype='int32',
name='steps_per_run')
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=steps_per_run,
initial_loop_values=initial_loop_values)
train_op = ctx.run_op
output_tensors = ctx.last_step_outputs
do_validation = bool(validation_steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=verbose,
count_mode='steps',
mode=mode)
# Calculate the steps each time on the device.
if use_steps:
steps_to_run = ([current_strategy.extended.steps_per_run] *
(steps_per_epoch //
current_strategy.extended.steps_per_run))
if steps_per_epoch % current_strategy.extended.steps_per_run:
steps_to_run.append(
steps_per_epoch % current_strategy.extended.steps_per_run)
target_steps = len(steps_to_run)
else:
target_steps = np.inf
callbacks._call_begin_hook(mode)
for epoch in range(initial_epoch, epochs):
distributed_training_utils._reset_metrics(model)
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
step_index = 0
prev_step_count = None
current_step = 0
while current_step < target_steps:
step_count = steps_to_run[current_step] if use_steps else 1
batch_logs = {'batch': step_index, 'size': 1, 'num_steps': step_count}
callbacks._call_batch_hook(mode, 'begin', step_index, batch_logs)
if prev_step_count is None or step_count != prev_step_count:
steps_per_run.load(step_count, K.get_session())
prev_step_count = step_count
try:
_, outputs = K.get_session().run([train_op, output_tensors])
except errors.OutOfRangeError:
if use_steps:
logging.warning('Your dataset iterator ran out of data; '
'interrupting training. Make sure that your dataset '
'can generate at least `steps_per_epoch * epochs` '
'batches (in this case, %d batches).' %
steps_per_epoch * epochs)
else:
target_steps = current_step
logging.info('Dataset iterator ran out of data. Inferring the '
'value of `steps_per_epoch` as %s .' % target_steps)
distributed_training_utils.initialize_iterator(iterator,
current_strategy)
break
batch_logs.update(outputs)
callbacks._call_batch_hook(mode, 'end', step_index, batch_logs)
step_index = step_index + step_count
current_step += 1
if callbacks.model.stop_training:
break
if (do_validation and
training_utils.should_run_validation(validation_freq, epoch)):
logging.info('Running validation at fit epoch: %s', epoch)
if model._compile_distribution:
# Since we create a new clone from the original model we need to copy
# the weights back to the original model before we can run validation.
distributed_training_utils._copy_weights_to_original_model(
model, ModeKeys.TRAIN)
val_outs = experimental_tpu_test_loop( # pylint: disable=undefined-variable
model,
val_dataset,
steps=validation_steps,
verbose=verbose,
callbacks=callbacks)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
for label, val_out in zip(out_labels, val_outs):
epoch_logs['val_' + label] = val_out
callbacks.on_epoch_end(epoch, epoch_logs)
if callbacks.model.stop_training:
break
callbacks._call_end_hook(mode)
if model._compile_distribution:
# Copy the weights back from the replicated model to the original model.
distributed_training_utils._copy_weights_to_original_model(
model, ModeKeys.TRAIN)
scope.__exit__(None, None, None)
return model.history
def experimental_tpu_test_loop(model,
dataset,
verbose=0,
steps=None,
callbacks=None):
"""Test loop for evaluating with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
callbacks: List of callbacks to be called during training
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
mode = ModeKeys.TEST
current_strategy = model._distribution_strategy
iterator = distributed_training_utils.get_iterator(dataset,
current_strategy)
steps = training_utils.infer_steps_for_dataset(dataset, steps,
steps_name='steps')
scope = distributed_training_utils.distributed_scope(
strategy=current_strategy, learning_phase=0)
scope.__enter__()
def _per_device_eval_function(model):
model._make_eval_function()
return (model._eval_function.inputs, model._eval_function.outputs,
model._eval_function.updates_op,
model._eval_function.session_kwargs)
def step_fn(ctx, inputs):
"""Clones the model and calls make_eval_function."""
inputs, targets = inputs
if model._compile_distribution:
distributed_training_utils.clone_model_on_replicas(
model, current_strategy, mode=mode, inputs=inputs, targets=targets)
else:
distributed_training_utils._build_distributed_network(
model, current_strategy, mode, inputs, targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.extended.call_for_each_replica(
_per_device_eval_function,
args=(distributed_training_utils.get_distributed_model(
model, ModeKeys.TEST),))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
for label, output in zip(model.metrics_names, combined_fn.outputs):
if label == 'loss':
reduce_op = ds_reduce_util.ReduceOp.SUM
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
return combined_fn.updates_op
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name in model.metrics_names[1:]:
tensor = model._all_stateful_metrics_tensors[name]
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
# TODO(priyag): Use steps_per_run when we use new metrics as they will
# allow handling metric computation at each step using variables.
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
test_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
distributed_training_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
epochs=1,
steps_per_epoch=steps,
verbose=verbose,
count_mode='steps',
mode=ModeKeys.TEST)
callbacks._call_begin_hook(mode)
outs = [0.] * len(model.metrics_names)
if steps is not None:
target_steps = steps
else:
target_steps = np.inf
current_step = 0
while current_step < target_steps:
batch_logs = {'batch': current_step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
try:
_, batch_outs = K.get_session().run([test_op, output_tensors])
except errors.OutOfRangeError:
if steps is not None:
warning_msg = 'Make sure that your dataset can generate at least '
'`steps` batches (in this case, {} batches).'.format(steps)
else:
warning_msg = 'Number of steps ran: {} steps'.format(current_step)
logging.warning('Your dataset iterator ran out of data; '
'interrupting evaluation. ' + warning_msg)
target_steps = current_step
break
for i, label in enumerate(model.metrics_names):
if i == 0:
# Loss is stateless metrics.
outs[i] += batch_outs[label]
else:
# For all stateful metrics, the aggregation is handled by mirrored vars.
outs[i] = batch_outs[label]
batch_logs = cbks.make_logs(model, batch_logs, outs, mode)
callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)
if verbose >= 1:
progbar.update(current_step + 1)
current_step += 1
callbacks._call_end_hook(mode)
scope.__exit__(None, None, None)
if len(outs) >= 0:
outs[0] /= (target_steps)
if len(outs) == 1:
return outs[0]
return outs
def experimental_tpu_predict_loop(model,
dataset,
verbose=0,
steps=None,
callbacks=None):
"""Predict loop for predicting with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
callbacks: List of callbacks to be called during training
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
mode = ModeKeys.PREDICT
steps = training_utils.infer_steps_for_dataset(dataset, steps,
steps_name='steps')
dataset_fully_shaped = (distributed_training_utils.
is_dataset_shape_fully_defined(dataset))
padding_handler = None
if not dataset_fully_shaped:
# TODO(hongjunchoi): Investigate whether operations from
# PartialBatchPaddingHandler are unnecessarily pruned out
# during graph optimization.
padding_handler = padding_util.PartialBatchPaddingHandler(
model._feed_output_shapes)
batch_size, _, prefetch_buffer = input_lib._get_dataset_attributes(dataset)
padding_handler.padded_batch_size = batch_size
padding_handler.padding_mask = dataset.reduce(padding_handler.padding_mask,
padding_handler.update_mask)
dataset = dataset.map(padding_handler.pad_batch)
dataset = dataset.apply(batching.unbatch())
# Upon this point, it is guaranteed that the dataset does not
# have partial batches. Thus, we set `drop_remainder=True` to
# get static shape information about the elements in the dataset.
dataset = dataset.batch(batch_size, drop_remainder=True)
if prefetch_buffer is not None:
dataset = dataset.prefetch(prefetch_buffer)
current_strategy = model._distribution_strategy
iterator = distributed_training_utils.get_iterator(dataset, current_strategy)
scope = distributed_training_utils.distributed_scope(
strategy=current_strategy, learning_phase=0)
scope.__enter__()
def _per_device_predict_function(model):
model._make_predict_function()
return (model.predict_function.inputs,
model.predict_function.outputs,
model.predict_function.updates_op,
model.predict_function.session_kwargs)
def step_fn(ctx, inputs):
"""Clones the model and calls make_predict_function."""
if model._compile_distribution:
distributed_training_utils.clone_model_on_replicas(
model, current_strategy, mode, inputs=inputs)
else:
distributed_training_utils._build_distributed_network(
model, current_strategy, mode, inputs)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.extended.call_for_each_replica(
_per_device_predict_function,
args=(distributed_training_utils.get_distributed_model(
model, ModeKeys.PREDICT),))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
for label, output in zip(model.output_names, combined_fn.outputs):
ctx.set_last_step_output(label, output)
return combined_fn.updates_op
# Add initial dummy values for outputs.
initial_loop_values = {}
batch_dimension = distributed_training_utils.get_batch_dimension(iterator)
for name, tensor in zip(model.output_names, model.outputs):
# TODO(priyag): This is a workaround as we do not know the batch dimension
# of the model's output at this point.
shape = tensor_shape.TensorShape(tensor.shape.dims)
shape.dims = [batch_dimension] + shape.dims[1:]
initial_loop_values[name] = array_ops.zeros(shape, tensor.dtype)
# TODO(priyag, sourabhbajaj): Support steps_per_run if/when we add outfeed.
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
predict_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
distributed_training_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
epochs=1,
steps_per_epoch=steps,
verbose=verbose,
count_mode='steps',
mode=mode)
callbacks._call_begin_hook(mode)
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = [[] for _ in model.outputs]
if steps is not None:
target_steps = steps
else:
target_steps = np.inf
current_step = 0
while current_step < target_steps:
batch_logs = {'batch': current_step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
try:
_, batch_outs = K.get_session().run([predict_op, output_tensors])
except errors.OutOfRangeError:
if steps is not None:
warning_msg = 'Make sure that your dataset can generate at least '
'`steps` batches (in this case, {} batches).'.format(steps)
else:
warning_msg = 'Number of steps ran: {} steps'.format(current_step)
logging.warning('Your dataset iterator ran out of data; '
'interrupting evaluation. ' + warning_msg)
break
# TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy.
for i, label in enumerate(model.output_names):
unconcatenated_outs[i].extend(batch_outs[label])
batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)
if verbose >= 1:
progbar.update(current_step + 1)
current_step += 1
callbacks._call_end_hook(mode)
scope.__exit__(None, None, None)
if len(unconcatenated_outs) == 1:
prediction_result = np.concatenate(unconcatenated_outs[0], axis=0)
else:
prediction_result = [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
if padding_handler:
prediction_result = padding_handler.apply_mask(prediction_result)
return prediction_result
| |
#!/usr/bin/python
import argparse
import base64
import csv
import getpass
import httplib
import socket
import ssl
import sys
import urllib2
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
class TLS1Connection(httplib.HTTPSConnection):
"""Like HTTPSConnection but more specific"""
def __init__(self, host, **kwargs):
httplib.HTTPSConnection.__init__(self, host, **kwargs)
def connect(self):
"""Overrides HTTPSConnection.connect to specify TLS version"""
# Standard implementation from HTTPSConnection, which is not
# designed for extension, unfortunately
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
if getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# This is the only difference; default wrap_socket uses SSLv23
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
class TLS1Handler(urllib2.HTTPSHandler):
"""Like HTTPSHandler but more specific"""
def __init__(self):
urllib2.HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(TLS1Connection, req)
class ArgParser(object):
def __init__(self):
parser = argparse.ArgumentParser(
prog = "StaticGroupFromSearch",
description = "Use the '/match' endpoint for Computers and Mobile devices to generate Static Groups.",
formatter_class=argparse.RawDescriptionHelpFormatter, epilog = """Example usage:
$ ./StaticGroupFromSearch.py https://jss.myorg.com "Contains 'iPhone'" -u 'user' -p 'pass' --mobiledevices -s '*iPhone*'
$ ./StaticGroupFromSearch.py https://jss.myorg.com "Starts with 'admin'" --computers --search 'admin*'
$ ./StaticGroupFromSearch.py https://jss.myorg.com "Devices from list" --mobiledevices --csv-file /path/to/list.csv
""")
parser.add_argument('jssurl', type=str, default=None, help="JSS URL")
parser.add_argument('groupname', type=str, default=None, help="new static group name")
groupSearchType = parser.add_mutually_exclusive_group(required=True)
groupSearchType.add_argument('-c', '--computers', action="store_true", help="search computers")
groupSearchType.add_argument('-m', '--mobiledevices', action="store_true", help="search mobile devices")
groupSearchInput = parser.add_mutually_exclusive_group(required=True)
groupSearchInput.add_argument('-f', '--csv-file', type=str, dest='file', default=None,
help="read search values from csv file")
groupSearchInput.add_argument('-s', '--search', type=str, default=None, help="search for a value")
parser.add_argument('-u', '--username', dest='username', type=str, default=None, help="API username")
parser.add_argument('-p', '--password', dest='password', type=str, default=None, help="API user password")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
self.searchtype = "computers" if args.computers else "mobiledevices"
if args.search:
self.searchvalue = [urllib2.quote(args.search)]
else:
self.searchvalue = []
with open(args.file, 'rU') as f:
reader = csv.reader(f)
for row in reader:
self.searchvalue.append(urllib2.quote(row[0]))
self.jssurl = self.clean_url(args.jssurl)
self.groupname = args.groupname
self.username = args.username if args.username else str(raw_input("API Username: "))
self.password = args.password if args.password else getpass.getpass("API Password: ")
@staticmethod
def clean_url(url):
cleaned_url = url.rstrip('/')
if not (cleaned_url.startswith('http://') or cleaned_url.startswith('https://')) :
print("valid prefix for server url not found: prefixing with https://")
cleaned_url = 'https://' + cleaned_url
return cleaned_url
class JSS(object):
def __init__(self, url, username, password, matchtype):
self.auth = base64.b64encode(username + ':' + password)
self.server = url
self.match_endpoint = '/JSSResource/{0}/match/'.format(matchtype)
if matchtype == 'computers':
self.group_endpoint = '/JSSResource/computergroups/id/0'
else:
self.group_endpoint = '/JSSResource/mobiledevicegroups/id/0'
def get_match(self, searchvalue):
print("performing search on the JSS at: ..{0}{1}".format(self.match_endpoint, searchvalue))
request = urllib2.Request(self.server + self.match_endpoint + searchvalue)
return etree.fromstring(self.request(request))
def create_group(self, postdata):
print("creating new Static Group on the JSS at: ..{0}".format(self.group_endpoint))
request = urllib2.Request(self.server + self.group_endpoint, postdata)
request.get_method = lambda: 'POST'
return etree.fromstring(self.request(request)).find('id').text
def request(self, request):
request.add_header('Authorization', 'Basic ' + self.auth)
request.add_header('Content-Type', 'text/xml')
request.add_header('Accept', 'text/xml')
try:
response = urllib2.urlopen(request)
except ValueError as e:
print("an error occurred during the search: {0}".format(e.message))
print("check the URL used and try again\n")
sys.exit(1)
except urllib2.HTTPError as e:
added_message = "there may be an existing group using the provided name\n" if e.code == 409 else ''
print("an error occurred during the search: {0} {1}: {2}\n{3}".format(type(e).__name__, e.code, e.reason,
added_message))
sys.exit(1)
except urllib2.URLError as e:
print("an error occurred during the search: {0}: {1}".format(type(e).__name__, e.reason))
print("check the server URL used and try again\n")
sys.exit(1)
except Exception as e:
print("an unknown error has occurred: {0}: {1}\n".format(type(e).__name__, e.message))
sys.exit(1)
return response.read()
def CreateGroupPostData(input, collection, grouping, item, groupname):
"""this function reads computer IDs from the 'input' and returns XML for a POST"""
root = etree.Element(collection)
name = etree.SubElement(root, 'name')
name.text = groupname
is_smart = etree.SubElement(root, 'is_smart')
is_smart.text = 'false'
itemlist = etree.SubElement(root, grouping)
for i in input:
add_element = etree.SubElement(itemlist, item)
add_element_id = etree.SubElement(add_element, 'id')
add_element_id.text = i
return etree.tostring(root)
def main():
args = ArgParser()
urllib2.install_opener(urllib2.build_opener(TLS1Handler()))
jss = JSS(args.jssurl, args.username, args.password, args.searchtype)
if args.searchtype == 'computers':
collection = 'computer_group'
grouping = 'computers'
item = 'computer'
else:
collection = 'mobile_device_group'
grouping = 'mobile_devices'
item = 'mobile_device'
match_results = []
for value in args.searchvalue:
results = jss.get_match(value)
for result in results.findall(item):
item_id = result.find('id').text
if item_id not in match_results:
match_results.append(item_id)
size = len(match_results)
if not size:
print("the JSS matched no results to the provided search value\n")
sys.exit(2)
else:
print("the JSS matched {0} result(s) to the provided search value".format(size))
data = CreateGroupPostData(match_results, collection, grouping, item, args.groupname)
new_group_id = jss.create_group(data)
print("the new Static Group has been created with ID: {0}\n".format(new_group_id))
sys.exit(0)
if __name__ == '__main__':
main()
| |
# Last Change: Mon Aug 20 08:00 PM 2007 J
import re
import datetime
from collections import OrderedDict
import numpy as np
import csv
import ctypes
"""A module to read arff files."""
__all__ = ['MetaData', 'loadarff', 'ArffError', 'ParseArffError']
# An Arff file is basically two parts:
# - header
# - data
#
# A header has each of its components starting by @META where META is one of
# the keyword (attribute of relation, for now).
# TODO:
# - both integer and reals are treated as numeric -> the integer info
# is lost!
# - Replace ValueError by ParseError or something
# We know can handle the following:
# - numeric and nominal attributes
# - missing values for numeric attributes
r_meta = re.compile(r'^\s*@')
# Match a comment
r_comment = re.compile(r'^%')
# Match an empty line
r_empty = re.compile(r'^\s+$')
# Match a header line, that is a line which starts by @ + a word
r_headerline = re.compile(r'^\s*@\S*')
r_datameta = re.compile(r'^@[Dd][Aa][Tt][Aa]')
r_relation = re.compile(r'^@[Rr][Ee][Ll][Aa][Tt][Ii][Oo][Nn]\s*(\S*)')
r_attribute = re.compile(r'^\s*@[Aa][Tt][Tt][Rr][Ii][Bb][Uu][Tt][Ee]\s*(..*$)')
r_nominal = re.compile(r'{(.+)}')
r_date = re.compile(r"[Dd][Aa][Tt][Ee]\s+[\"']?(.+?)[\"']?$")
# To get attributes name enclosed with ''
r_comattrval = re.compile(r"'(..+)'\s+(..+$)")
# To get normal attributes
r_wcomattrval = re.compile(r"(\S+)\s+(..+$)")
# ------------------------
# Module defined exception
# ------------------------
class ArffError(IOError):
pass
class ParseArffError(ArffError):
pass
# ----------
# Attributes
# ----------
class Attribute(object):
type_name = None
def __init__(self, name):
self.name = name
self.range = None
self.dtype = np.object_
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
"""
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
"""
return None
def __str__(self):
"""
Parse a value of this type.
"""
return self.name + ',' + self.type_name
class NominalAttribute(Attribute):
type_name = 'nominal'
def __init__(self, name, values):
super().__init__(name)
self.values = values
self.range = values
self.dtype = (np.string_, max(len(i) for i in values))
@staticmethod
def _get_nom_val(atrv):
"""Given a string containing a nominal type, returns a tuple of the
possible values.
A nominal type is defined as something framed between braces ({}).
Parameters
----------
atrv : str
Nominal type definition
Returns
-------
poss_vals : tuple
possible values
Examples
--------
>>> get_nom_val("{floup, bouga, fl, ratata}")
('floup', 'bouga', 'fl', 'ratata')
"""
m = r_nominal.match(atrv)
if m:
attrs, _ = split_data_line(m.group(1))
return tuple(attrs)
else:
raise ValueError("This does not look like a nominal string")
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For nominal attributes, the attribute string would be like '{<attr_1>,
<attr2>, <attr_3>}'.
"""
if attr_string[0] == '{':
values = cls._get_nom_val(attr_string)
return cls(name, values)
else:
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
"""
if data_str in self.values:
return data_str
elif data_str == '?':
return data_str
else:
raise ValueError("%s value not in %s" % (str(data_str),
str(self.values)))
def __str__(self):
msg = self.name + ",{"
for i in range(len(self.values)-1):
msg += self.values[i] + ","
msg += self.values[-1]
msg += "}"
return msg
class NumericAttribute(Attribute):
def __init__(self, name):
super().__init__(name)
self.type_name = 'numeric'
self.dtype = np.float_
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For numeric attributes, the attribute string would be like
'numeric' or 'int' or 'real'.
"""
attr_string = attr_string.lower().strip()
if(attr_string[:len('numeric')] == 'numeric' or
attr_string[:len('int')] == 'int' or
attr_string[:len('real')] == 'real'):
return cls(name)
else:
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
Parameters
----------
data_str : str
string to convert
Returns
-------
f : float
where float can be nan
Examples
--------
>>> atr = NumericAttribute('atr')
>>> atr.parse_data('1')
1.0
>>> atr.parse_data('1\\n')
1.0
>>> atr.parse_data('?\\n')
nan
"""
if '?' in data_str:
return np.nan
else:
return float(data_str)
def _basic_stats(self, data):
nbfac = data.size * 1. / (data.size - 1)
return (np.nanmin(data), np.nanmax(data),
np.mean(data), np.std(data) * nbfac)
class StringAttribute(Attribute):
def __init__(self, name):
super().__init__(name)
self.type_name = 'string'
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For string attributes, the attribute string would be like
'string'.
"""
attr_string = attr_string.lower().strip()
if attr_string[:len('string')] == 'string':
return cls(name)
else:
return None
class DateAttribute(Attribute):
def __init__(self, name, date_format, datetime_unit):
super().__init__(name)
self.date_format = date_format
self.datetime_unit = datetime_unit
self.type_name = 'date'
self.range = date_format
self.dtype = np.datetime64(0, self.datetime_unit)
@staticmethod
def _get_date_format(atrv):
m = r_date.match(atrv)
if m:
pattern = m.group(1).strip()
# convert time pattern from Java's SimpleDateFormat to C's format
datetime_unit = None
if "yyyy" in pattern:
pattern = pattern.replace("yyyy", "%Y")
datetime_unit = "Y"
elif "yy":
pattern = pattern.replace("yy", "%y")
datetime_unit = "Y"
if "MM" in pattern:
pattern = pattern.replace("MM", "%m")
datetime_unit = "M"
if "dd" in pattern:
pattern = pattern.replace("dd", "%d")
datetime_unit = "D"
if "HH" in pattern:
pattern = pattern.replace("HH", "%H")
datetime_unit = "h"
if "mm" in pattern:
pattern = pattern.replace("mm", "%M")
datetime_unit = "m"
if "ss" in pattern:
pattern = pattern.replace("ss", "%S")
datetime_unit = "s"
if "z" in pattern or "Z" in pattern:
raise ValueError("Date type attributes with time zone not "
"supported, yet")
if datetime_unit is None:
raise ValueError("Invalid or unsupported date format")
return pattern, datetime_unit
else:
raise ValueError("Invalid or no date format")
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For date attributes, the attribute string would be like
'date <format>'.
"""
attr_string_lower = attr_string.lower().strip()
if attr_string_lower[:len('date')] == 'date':
date_format, datetime_unit = cls._get_date_format(attr_string)
return cls(name, date_format, datetime_unit)
else:
return None
def parse_data(self, data_str):
"""
Parse a value of this type.
"""
date_str = data_str.strip().strip("'").strip('"')
if date_str == '?':
return np.datetime64('NaT', self.datetime_unit)
else:
dt = datetime.datetime.strptime(date_str, self.date_format)
return np.datetime64(dt).astype(
"datetime64[%s]" % self.datetime_unit)
def __str__(self):
return super(DateAttribute, self).__str__() + ',' + self.date_format
class RelationalAttribute(Attribute):
def __init__(self, name):
super().__init__(name)
self.type_name = 'relational'
self.dtype = np.object_
self.attributes = []
self.dialect = None
@classmethod
def parse_attribute(cls, name, attr_string):
"""
Parse the attribute line if it knows how. Returns the parsed
attribute, or None.
For date attributes, the attribute string would be like
'date <format>'.
"""
attr_string_lower = attr_string.lower().strip()
if attr_string_lower[:len('relational')] == 'relational':
return cls(name)
else:
return None
def parse_data(self, data_str):
# Copy-pasted
elems = list(range(len(self.attributes)))
escaped_string = data_str.encode().decode("unicode-escape")
row_tuples = []
for raw in escaped_string.split("\n"):
row, self.dialect = split_data_line(raw, self.dialect)
row_tuples.append(tuple(
[self.attributes[i].parse_data(row[i]) for i in elems]))
return np.array(row_tuples,
[(a.name, a.dtype) for a in self.attributes])
def __str__(self):
return (super(RelationalAttribute, self).__str__() + '\n\t' +
'\n\t'.join(str(a) for a in self.attributes))
# -----------------
# Various utilities
# -----------------
def to_attribute(name, attr_string):
attr_classes = (NominalAttribute, NumericAttribute, DateAttribute,
StringAttribute, RelationalAttribute)
for cls in attr_classes:
attr = cls.parse_attribute(name, attr_string)
if attr is not None:
return attr
raise ParseArffError("unknown attribute %s" % attr_string)
def csv_sniffer_has_bug_last_field():
"""
Checks if the bug https://bugs.python.org/issue30157 is unpatched.
"""
# We only compute this once.
has_bug = getattr(csv_sniffer_has_bug_last_field, "has_bug", None)
if has_bug is None:
dialect = csv.Sniffer().sniff("3, 'a'")
csv_sniffer_has_bug_last_field.has_bug = dialect.quotechar != "'"
has_bug = csv_sniffer_has_bug_last_field.has_bug
return has_bug
def workaround_csv_sniffer_bug_last_field(sniff_line, dialect, delimiters):
"""
Workaround for the bug https://bugs.python.org/issue30157 if is unpatched.
"""
if csv_sniffer_has_bug_last_field():
# Reuses code from the csv module
right_regex = r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # .*?",
right_regex, # ,".*?"
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(sniff_line)
if matches:
break
# If it does not match the expression that was bugged, then this bug does not apply
if restr != right_regex:
return
groupindex = regexp.groupindex
# There is only one end of the string
assert len(matches) == 1
m = matches[0]
n = groupindex['quote'] - 1
quote = m[n]
n = groupindex['delim'] - 1
delim = m[n]
n = groupindex['space'] - 1
space = bool(m[n])
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" %
{'delim': re.escape(delim), 'quote': quote}, re.MULTILINE
)
doublequote = bool(dq_regexp.search(sniff_line))
dialect.quotechar = quote
if delim in delimiters:
dialect.delimiter = delim
dialect.doublequote = doublequote
dialect.skipinitialspace = space
def split_data_line(line, dialect=None):
delimiters = ",\t"
# This can not be done in a per reader basis, and relational fields
# can be HUGE
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
# Remove the line end if any
if line[-1] == '\n':
line = line[:-1]
sniff_line = line
# Add a delimiter if none is present, so that the csv.Sniffer
# does not complain for a single-field CSV.
if not any(d in line for d in delimiters):
sniff_line += ","
if dialect is None:
dialect = csv.Sniffer().sniff(sniff_line, delimiters=delimiters)
workaround_csv_sniffer_bug_last_field(sniff_line=sniff_line,
dialect=dialect,
delimiters=delimiters)
row = next(csv.reader([line], dialect))
return row, dialect
# --------------
# Parsing header
# --------------
def tokenize_attribute(iterable, attribute):
"""Parse a raw string in header (e.g., starts by @attribute).
Given a raw string attribute, try to get the name and type of the
attribute. Constraints:
* The first line must start with @attribute (case insensitive, and
space like characters before @attribute are allowed)
* Works also if the attribute is spread on multilines.
* Works if empty lines or comments are in between
Parameters
----------
attribute : str
the attribute string.
Returns
-------
name : str
name of the attribute
value : str
value of the attribute
next : str
next line to be parsed
Examples
--------
If attribute is a string defined in python as r"floupi real", will
return floupi as name, and real as value.
>>> iterable = iter([0] * 10) # dummy iterator
>>> tokenize_attribute(iterable, r"@attribute floupi real")
('floupi', 'real', 0)
If attribute is r"'floupi 2' real", will return 'floupi 2' as name,
and real as value.
>>> tokenize_attribute(iterable, r" @attribute 'floupi 2' real ")
('floupi 2', 'real', 0)
"""
sattr = attribute.strip()
mattr = r_attribute.match(sattr)
if mattr:
# atrv is everything after @attribute
atrv = mattr.group(1)
if r_comattrval.match(atrv):
name, type = tokenize_single_comma(atrv)
next_item = next(iterable)
elif r_wcomattrval.match(atrv):
name, type = tokenize_single_wcomma(atrv)
next_item = next(iterable)
else:
# Not sure we should support this, as it does not seem supported by
# weka.
raise ValueError("multi line not supported yet")
else:
raise ValueError("First line unparsable: %s" % sattr)
attribute = to_attribute(name, type)
if type.lower() == 'relational':
next_item = read_relational_attribute(iterable, attribute, next_item)
# raise ValueError("relational attributes not supported yet")
return attribute, next_item
def tokenize_single_comma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_comattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def tokenize_single_wcomma(val):
# XXX we match twice the same string (here and at the caller level). It is
# stupid, but it is easier for now...
m = r_wcomattrval.match(val)
if m:
try:
name = m.group(1).strip()
type = m.group(2).strip()
except IndexError:
raise ValueError("Error while tokenizing attribute")
else:
raise ValueError("Error while tokenizing single %s" % val)
return name, type
def read_relational_attribute(ofile, relational_attribute, i):
"""Read the nested attributes of a relational attribute"""
r_end_relational = re.compile(r'^@[Ee][Nn][Dd]\s*' +
relational_attribute.name + r'\s*$')
while not r_end_relational.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
attr, i = tokenize_attribute(ofile, i)
relational_attribute.attributes.append(attr)
else:
raise ValueError("Error parsing line %s" % i)
else:
i = next(ofile)
i = next(ofile)
return i
def read_header(ofile):
"""Read the header of the iterable ofile."""
i = next(ofile)
# Pass first comments
while r_comment.match(i):
i = next(ofile)
# Header is everything up to DATA attribute ?
relation = None
attributes = []
while not r_datameta.match(i):
m = r_headerline.match(i)
if m:
isattr = r_attribute.match(i)
if isattr:
attr, i = tokenize_attribute(ofile, i)
attributes.append(attr)
else:
isrel = r_relation.match(i)
if isrel:
relation = isrel.group(1)
else:
raise ValueError("Error parsing line %s" % i)
i = next(ofile)
else:
i = next(ofile)
return relation, attributes
class MetaData(object):
"""Small container to keep useful information on a ARFF dataset.
Knows about attributes names and types.
Examples
--------
::
data, meta = loadarff('iris.arff')
# This will print the attributes names of the iris.arff dataset
for i in meta:
print(i)
# This works too
meta.names()
# Getting attribute type
types = meta.types()
Methods
-------
names
types
Notes
-----
Also maintains the list of attributes in order, i.e., doing for i in
meta, where meta is an instance of MetaData, will return the
different attribute names in the order they were defined.
"""
def __init__(self, rel, attr):
self.name = rel
# We need the dictionary to be ordered
self._attributes = OrderedDict((a.name, a) for a in attr)
def __repr__(self):
msg = ""
msg += "Dataset: %s\n" % self.name
for i in self._attributes:
msg += "\t%s's type is %s" % (i, self._attributes[i].type_name)
if self._attributes[i].range:
msg += ", range is %s" % str(self._attributes[i].range)
msg += '\n'
return msg
def __iter__(self):
return iter(self._attributes)
def __getitem__(self, key):
attr = self._attributes[key]
return (attr.type_name, attr.range)
def names(self):
"""Return the list of attribute names.
Returns
-------
attrnames : list of str
The attribute names.
"""
return list(self._attributes)
def types(self):
"""Return the list of attribute types.
Returns
-------
attr_types : list of str
The attribute types.
"""
attr_types = [self._attributes[name].type_name
for name in self._attributes]
return attr_types
def loadarff(f):
"""
Read an arff file.
The data is returned as a record array, which can be accessed much like
a dictionary of NumPy arrays. For example, if one of the attributes is
called 'pressure', then its first 10 data points can be accessed from the
``data`` record array like so: ``data['pressure'][0:10]``
Parameters
----------
f : file-like or str
File-like object to read from, or filename to open.
Returns
-------
data : record array
The data of the arff file, accessible by attribute names.
meta : `MetaData`
Contains information about the arff file such as name and
type of attributes, the relation (name of the dataset), etc.
Raises
------
ParseArffError
This is raised if the given file is not ARFF-formatted.
NotImplementedError
The ARFF file has an attribute which is not supported yet.
Notes
-----
This function should be able to read most arff files. Not
implemented functionality include:
* date type attributes
* string type attributes
It can read files with numeric and nominal attributes. It cannot read
files with sparse data ({} in the file). However, this function can
read files with missing data (? in the file), representing the data
points as NaNs.
Examples
--------
>>> from scipy.io import arff
>>> from io import StringIO
>>> content = \"\"\"
... @relation foo
... @attribute width numeric
... @attribute height numeric
... @attribute color {red,green,blue,yellow,black}
... @data
... 5.0,3.25,blue
... 4.5,3.75,green
... 3.0,4.00,red
... \"\"\"
>>> f = StringIO(content)
>>> data, meta = arff.loadarff(f)
>>> data
array([(5.0, 3.25, 'blue'), (4.5, 3.75, 'green'), (3.0, 4.0, 'red')],
dtype=[('width', '<f8'), ('height', '<f8'), ('color', '|S6')])
>>> meta
Dataset: foo
\twidth's type is numeric
\theight's type is numeric
\tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black')
"""
if hasattr(f, 'read'):
ofile = f
else:
ofile = open(f, 'rt')
try:
return _loadarff(ofile)
finally:
if ofile is not f: # only close what we opened
ofile.close()
def _loadarff(ofile):
# Parse the header file
try:
rel, attr = read_header(ofile)
except ValueError as e:
msg = "Error while parsing header, error was: " + str(e)
raise ParseArffError(msg)
# Check whether we have a string attribute (not supported yet)
hasstr = False
for a in attr:
if isinstance(a, StringAttribute):
hasstr = True
meta = MetaData(rel, attr)
# XXX The following code is not great
# Build the type descriptor descr and the list of convertors to convert
# each attribute to the suitable type (which should match the one in
# descr).
# This can be used once we want to support integer as integer values and
# not as numeric anymore (using masked arrays ?).
if hasstr:
# How to support string efficiently ? Ideally, we should know the max
# size of the string before allocating the numpy array.
raise NotImplementedError("String attributes not supported yet, sorry")
ni = len(attr)
def generator(row_iter, delim=','):
# TODO: this is where we are spending time (~80%). I think things
# could be made more efficiently:
# - We could for example "compile" the function, because some values
# do not change here.
# - The function to convert a line to dtyped values could also be
# generated on the fly from a string and be executed instead of
# looping.
# - The regex are overkill: for comments, checking that a line starts
# by % should be enough and faster, and for empty lines, same thing
# --> this does not seem to change anything.
# 'compiling' the range since it does not change
# Note, I have already tried zipping the converters and
# row elements and got slightly worse performance.
elems = list(range(ni))
dialect = None
for raw in row_iter:
# We do not abstract skipping comments and empty lines for
# performance reasons.
if r_comment.match(raw) or r_empty.match(raw):
continue
row, dialect = split_data_line(raw, dialect)
yield tuple([attr[i].parse_data(row[i]) for i in elems])
a = list(generator(ofile))
# No error should happen here: it is a bug otherwise
data = np.array(a, [(a.name, a.dtype) for a in attr])
return data, meta
# ----
# Misc
# ----
def basic_stats(data):
nbfac = data.size * 1. / (data.size - 1)
return np.nanmin(data), np.nanmax(data), np.mean(data), np.std(data) * nbfac
def print_attribute(name, tp, data):
type = tp.type_name
if type == 'numeric' or type == 'real' or type == 'integer':
min, max, mean, std = basic_stats(data)
print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std))
else:
print(str(tp))
def test_weka(filename):
data, meta = loadarff(filename)
print(len(data.dtype))
print(data.size)
for i in meta:
print_attribute(i, meta[i], data[i])
# make sure nose does not find this as a test
test_weka.__test__ = False
if __name__ == '__main__':
import sys
filename = sys.argv[1]
test_weka(filename)
| |
#!/usr/bin/env python3
"""
Pylookup is to lookup entries from python documentation, especially within
emacs. Pylookup adopts most of ideas from haddoc, lovely toolkit by Martin
Blais.
(usage)
./pylookup.py -l ljust
./pylookup.py -u http://docs.python.org
"""
from __future__ import with_statement
import os
import sys
import re
try:
import cPickle as pickle
except:
import pickle
import formatter
from os.path import join, dirname, exists, abspath, expanduser
from contextlib import closing
if sys.version_info[0] == 3:
import html.parser as htmllib
import urllib.parse as urlparse
import urllib.request as urllib
else:
import htmllib, urllib, urlparse
VERBOSE = False
FORMATS = {
"Emacs" : "{entry}\t({desc})\t[{book}];{url}",
"Terminal" : "{entry}\t({desc})\t[{book}]\n{url}"
}
def build_book(s, num):
"""
Build book identifier from `s`, with `num` links.
"""
for matcher, replacement in (("library", "lib"),
("c-api", "api"),
("reference", "ref"),
("", "etc")):
if matcher in s:
return replacement if num == 1 else "%s/%d" % (replacement, num)
def trim(s):
"""
Add any globle filtering rules here
"""
s = s.replace( "Python Enhancement Proposals!", "")
s = s.replace( "PEP ", "PEP-")
return s
class Element(object):
def __init__(self, entry, desc, book, url):
self.book = book
self.url = url
self.desc = desc
self.entry = entry
def __format__(self, format_spec):
return format_spec.format(entry=self.entry, desc=self.desc,
book=self.book, url=self.url)
def match_insensitive(self, key):
"""
Match key case insensitive against entry and desc.
`key` : Lowercase string.
"""
return key in self.entry.lower() or key in self.desc.lower()
def match_sensitive(self, key):
"""
Match key case sensitive against entry and desc.
`key` : Lowercase string.
"""
return key in self.entry or key in self.desc
def match_in_entry_insensitive(self, key):
"""
Match key case insensitive against entry.
`key` : Lowercase string.
"""
return key in self.entry.lower()
def match_in_entry_sensitive(self, key):
"""
Match key case sensitive against entry.
`key` : Lowercase string.
"""
return key in self.entry
def get_matcher(insensitive=True, desc=True):
"""
Get `Element.match_*` function.
>>> get_matcher(0, 0)
<unbound method Element.match_in_entry_sensitive>
>>> get_matcher(1, 0)
<unbound method Element.match_in_entry_insensitive>
>>> get_matcher(0, 1)
<unbound method Element.match_sensitive>
>>> get_matcher(1, 1)
<unbound method Element.match_insensitive>
"""
_sensitive = "_insensitive" if insensitive else "_sensitive"
_in_entry = "" if desc else "_in_entry"
return getattr(Element, "match{0}{1}".format(_in_entry, _sensitive))
class IndexProcessor( htmllib.HTMLParser ):
"""
Extract the index links from a Python HTML documentation index.
"""
def __init__( self, writer, dirn):
htmllib.HTMLParser.__init__( self, formatter.NullFormatter() )
self.writer = writer
self.dirn = dirn
self.entry = ""
self.desc = ""
self.list_entry = False
self.do_entry = False
self.one_entry = False
self.num_of_a = 0
self.desc_cnt = 0
def start_dd( self, att ):
self.list_entry = True
def end_dd( self ):
self.list_entry = False
def start_dt( self, att ):
self.one_entry = True
self.num_of_a = 0
def end_dt( self ):
self.do_entry = False
def start_a( self, att ):
if self.one_entry:
self.url = join( self.dirn, dict( att )[ 'href' ] )
self.save_bgn()
def end_a( self ):
global VERBOSE
if self.one_entry:
if self.num_of_a == 0 :
self.desc = self.save_end()
if VERBOSE:
self.desc_cnt += 1
if self.desc_cnt % 100 == 0:
sys.stdout.write("%04d %s\r" \
% (self.desc_cnt, self.desc.ljust(80)))
# extract fist element
# ex) __and__() (in module operator)
if not self.list_entry :
self.entry = re.sub( "\([^)]+\)", "", self.desc )
# clean up PEP
self.entry = trim(self.entry)
match = re.search( "\([^)]+\)", self.desc )
if match :
self.desc = match.group(0)
self.desc = trim(re.sub( "[()]", "", self.desc ))
self.num_of_a += 1
book = build_book(self.url, self.num_of_a)
e = Element(self.entry, self.desc, book, self.url)
self.writer(e)
def update(db, urls, append=False):
"""Update database with entries from urls.
`db` : filename to database
`urls` : list of URL
`append` : append to db
"""
mode = "ab" if append else "wb"
with open(db, mode) as f:
writer = lambda e: pickle.dump(e, f)
for url in urls:
# detech 'file' or 'url' schemes
parsed = urlparse.urlparse(url)
if not parsed.scheme or parsed.scheme == "file":
dst = abspath(expanduser(parsed.path))
if not os.path.exists(dst):
print("Error: %s doesn't exist" % dst)
exit(1)
url = "file://%s" % dst
else:
url = parsed.geturl()
# direct to genindex-all.html
if not url.endswith('.html'):
url = url.rstrip("/") + "/genindex-all.html"
print("Wait for a few seconds ..\nFetching htmls from '%s'" % url)
try:
index = urllib.urlopen(url).read()
if not issubclass(type(index), str):
index = index.decode()
parser = IndexProcessor(writer, dirname(url))
with closing(parser):
parser.feed(index)
except IOError:
print("Error: fetching file from the web: '%s'" % sys.exc_info())
def lookup(db, key, format_spec, out=sys.stdout, insensitive=True, desc=True):
"""Lookup key from database and print to out.
`db` : filename to database
`key` : key to lookup
`out` : file-like to write to
`insensitive` : lookup key case insensitive
"""
matcher = get_matcher(insensitive, desc)
if insensitive:
key = key.lower()
with open(db, "rb") as f:
try:
while True:
e = pickle.load(f)
if matcher(e, key):
out.write('%s\n' % format(e, format_spec))
except EOFError:
pass
def cache(db, out=sys.stdout):
"""Print unique entries from db to out.
`db` : filename to database
`out` : file-like to write to
"""
with open(db, "rb") as f:
keys = set()
try:
while True:
e = pickle.load(f)
k = e.entry
k = re.sub( "\([^)]*\)", "", k )
k = re.sub( "\[[^]]*\]", "", k )
keys.add(k)
except EOFError:
pass
for k in keys:
out.write('%s\n' % k)
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser( __doc__.strip() )
parser.add_option( "-d", "--db",
help="database name",
dest="db", default="pylookup.db" )
parser.add_option( "-l", "--lookup",
help="keyword to search",
dest="key" )
parser.add_option( "-u", "--update",
help="update url or path",
action="append", type="str", dest="url" )
parser.add_option( "-c", "--cache" ,
help="extract keywords, internally used",
action="store_true", default=False, dest="cache")
parser.add_option( "-a", "--append",
help="append to the db from multiple sources",
action="store_true", default=False, dest="append")
parser.add_option( "-f", "--format",
help="type of output formatting, valid: Emacs, Terminal",
choices=["Emacs", "Terminal"],
default="Terminal", dest="format")
parser.add_option( "-i", "--insensitive", default=1, choices=['0', '1'],
help="SEARCH OPTION: insensitive search "
"(valid: 0, 1; default: %default)")
parser.add_option( "-s", "--desc", default=1, choices=['0', '1'],
help="SEARCH OPTION: include description field "
"(valid: 0, 1; default: %default)")
parser.add_option("-v", "--verbose",
help="verbose", action="store_true",
dest="verbose", default=False)
( opts, args ) = parser.parse_args()
VERBOSE = opts.verbose
if opts.url:
update(opts.db, opts.url, opts.append)
if opts.cache:
cache(opts.db)
if opts.key:
lookup(opts.db, opts.key, FORMATS[opts.format],
insensitive=int(opts.insensitive), desc=int(opts.desc))
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for training translation models and decoding from them.
Running this program without --decode will download the WMT corpus into
the directory specified as --data_dir and tokenize it in a very basic way,
and then start training a model saving checkpoints to --train_dir.
Running with --decode starts an interactive loop so you can see how
the current checkpoint translates English sentences into French.
See the following papers for more information on neural translation models.
* http://arxiv.org/abs/1409.3215
* http://arxiv.org/abs/1409.0473
* http://arxiv.org/pdf/1412.2007v2.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.rnn.translate import data_utils
from tensorflow.models.rnn.translate import seq2seq_model
from tensorflow.python.platform import gfile
tf.app.flags.DEFINE_float("learning_rate", 0.5, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 3, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("en_vocab_size", 40000, "English vocabulary size.")
tf.app.flags.DEFINE_integer("fr_vocab_size", 40000, "French vocabulary size.")
tf.app.flags.DEFINE_string("data_dir", "/tmp", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "/tmp", "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 200,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False,
"Run a self-test if this is set to True.")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
_buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
def read_data(source_path, target_path, max_size=None):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with gfile.GFile(source_path, mode="r") as source_file:
with gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
model = seq2seq_model.Seq2SeqModel(
FLAGS.en_vocab_size, FLAGS.fr_vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,
forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and gfile.Exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.initialize_all_variables())
return model
def train():
"""Train a en->fr translation model using WMT data."""
# Prepare WMT data.
print("Preparing WMT data in %s" % FLAGS.data_dir)
en_train, fr_train, en_dev, fr_dev, _, _ = data_utils.prepare_wmt_data(
FLAGS.data_dir, FLAGS.en_vocab_size, FLAGS.fr_vocab_size)
with tf.Session() as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print ("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(en_dev, fr_dev)
train_set = read_data(en_train, fr_train, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.exp(loss) if loss < 300 else float('inf')
print ("global step %d learning rate %.4f step-time %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
for bucket_id in xrange(len(_buckets)):
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
dev_set, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')
print(" eval: bucket %d perplexity %.2f" % (bucket_id, eval_ppx))
sys.stdout.flush()
def decode():
with tf.Session() as sess:
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1 # We decode one sentence at a time.
# Load vocabularies.
en_vocab_path = os.path.join(FLAGS.data_dir,
"vocab%d.en" % FLAGS.en_vocab_size)
fr_vocab_path = os.path.join(FLAGS.data_dir,
"vocab%d.fr" % FLAGS.fr_vocab_size)
en_vocab, _ = data_utils.initialize_vocabulary(en_vocab_path)
_, rev_fr_vocab = data_utils.initialize_vocabulary(fr_vocab_path)
# Decode from standard input.
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(sentence, en_vocab)
# Which bucket does it belong to?
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
# Print out French sentence corresponding to outputs.
print(" ".join([rev_fr_vocab[output] for output in outputs]))
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
def self_test():
"""Test the translation model."""
with tf.Session() as sess:
print("Self-test for neural translation model.")
# Create model with vocabularies of 10, 2 small buckets, 2 layers of 32.
model = seq2seq_model.Seq2SeqModel(10, 10, [(3, 3), (6, 6)], 32, 2,
5.0, 32, 0.3, 0.99, num_samples=8)
sess.run(tf.initialize_all_variables())
# Fake data set for both the (3, 3) and (6, 6) bucket.
data_set = ([([1, 1], [2, 2]), ([3, 3], [4]), ([5], [6])],
[([1, 1, 1, 1, 1], [2, 2, 2, 2, 2]), ([3, 3, 3], [5, 6])])
for _ in xrange(5): # Train the fake model for 5 steps.
bucket_id = random.choice([0, 1])
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
data_set, bucket_id)
model.step(sess, encoder_inputs, decoder_inputs, target_weights,
bucket_id, False)
def main(_):
if FLAGS.self_test:
self_test()
elif FLAGS.decode:
decode()
else:
train()
if __name__ == "__main__":
tf.app.run()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Spencer Caplan
# Department of Linguistics, University of Pennsylvania
# Contact: spcaplan@sas.upenn.edu
import sys, math, os, subprocess, glob, nltk, re, operator
import argparse
import readChildes
import syllableCount
from string import punctuation
from nltk import word_tokenize
reload(sys)
sys.setdefaultencoding('utf-8')
import unicodedata
from unicodedata import normalize
punctuationSet = ['.', '?', '!', ':', '(.)', '+...', '+"/.', '+/.']
morphCue = ['%mor:', '%xmor:', '%newmor:', '%trn:']
motherSet = ['*mot:', '*gra:', '*fat:', '*ann:', '*ant:', '*nan:', '*wom:', '*car:', '*inv:', '*par:', '*mut:', '*vat:', '*oma:', '*exp:', '*car:', '*bri', '*nen:', '*mag:', '*gmt:', '*tmo:', '*exa:']
childSet = ['*chi:', '*eli:', '*gre:', '*mar:', '*tai:']
regex = re.compile('[^a-z]')
missingSpeakerInfoDict = {}
childWords = {}
motherWords = {}
motherIsolatedWords = {}
motherRightEdgeNonIsoWords = {}
charlesFreqDict = {}
# Key: word
# Value: dict mapping from POS to count
wordPOSdict = {}
### Key: Word
### Tuple Value: charlesChildesFrequency(0), motherTotalFreq(1), childTotalFreq(2), motherIsolatedUtterenceCount(3), motherUtteranceFinalLongCount(4)
def readChaFile(dataFileName):
global motherWords, childWords#, readChinChar
with open(dataFileName, 'r') as currFile:
# store tuples
speechGroup = []
for currLine in currFile:
if not currLine:
continue
if currLine[0] == '@':
continue
if currLine == '':
continue
currLine = currLine.rstrip().lower()
currLineTokens = currLine.split()
if len(currLineTokens) == 0:
continue
if (currLineTokens[0][0] == "*"):
# Needs full group to extract from
if (len(speechGroup) > 1):
cleanedSpeechLine, cleanedTagLine = readChildes.cleanSpeechGroup(speechGroup)
#print cleanedSpeechLine
#print cleanedTagLine
#print '\n'
if len(cleanedSpeechLine) > 0:
speaker = cleanedSpeechLine[0]
cleanedWords = []
if not args.readChineseData:
for word in cleanedSpeechLine[1:]:
cleanedWord = regex.sub('', word)
cleanedWords.append(cleanedWord)
else:
for word in cleanedSpeechLine[1:]:
cleanedWords.append(word)
extractPartOfSpeechInfo(cleanedTagLine)
if speaker in motherSet:
for word in cleanedWords:
# print word
motherWords = readChildes.incrementDict(motherWords, word)
# print motherWords[word]
extractMotherUtteranceStats(cleanedWords, cleanedSpeechLine)
#print cleanedWords
elif speaker in childSet:
for word in cleanedWords:
childWords = readChildes.incrementDict(childWords, word)
#print cleanedSpeechLine
else:
missingSpeakerInfoDict[speaker] = dataFileName
else:
print 'EMPTY SPEECH LINE'
sys.exit()
speechGroup = []
speechGroup.append(currLine)
def extractPartOfSpeechInfo(tagLine):
for entry in tagLine:
entryPieces = entry.split('|')
if len(entryPieces) == 2:
currTag = entryPieces[0]
currWord = entryPieces[1]
currWord, sep, tail = currWord.partition('-')
currWord, sep, tail = currWord.partition('&')
currWord, sep, tail = currWord.partition('~')
currWord, sep, tail = currWord.partition('=')
# print currWord
if currWord in wordPOSdict:
currWordTagDict = wordPOSdict[currWord]
currWordTagDict = readChildes.incrementDict(currWordTagDict, currTag)
else:
currWordTagDict = {}
currWordTagDict = readChildes.incrementDict(currWordTagDict, currTag)
wordPOSdict[currWord] = currWordTagDict
#elif len(entryPieces) > 2:
# print entryPieces
def getMostFreqTag(wordToCheck):
highestTag = 'NA'
highestTagCount = 0
if wordToCheck in wordPOSdict:
currWordTagDict = wordPOSdict[wordToCheck]
for tag in currWordTagDict:
count = currWordTagDict[tag]
if count > highestTagCount:
highestTagCount = count
highestTag = tag
return highestTag
def extractMotherUtteranceStats(words, currLine):
global motherRightEdgeNonIsoWords, motherIsolatedWords
#print words
if len(words) == 1:
motherIsolatedWords = readChildes.incrementDict(motherIsolatedWords, words[0])
elif len(words) > 1:
finalWord = words[-1]
# if finalWord == 'the':
# print currLine
# print words
# print finalWord
motherRightEdgeNonIsoWords = readChildes.incrementDict(motherRightEdgeNonIsoWords, finalWord)
def readChildesDirectory(sourceDir):
for dataFileName in glob.glob(sourceDir+"*.cha"):
# print (os.getcwd() + '/' + dataFileName)
readChaFile(dataFileName)
def iterateSubDir(directoryName):
# call function to iterate over any ".cha" files in this directory
readChildesDirectory(directoryName)
# going through each immediate subdirectory
for subDir in next(os.walk(directoryName))[1]:
subDirPath = directoryName + subDir + '/'
os.chdir(subDirPath)
readChildesDirectory(subDirPath)
def readWordList(source):
with open(source, 'r') as inputFile:
for currLine in inputFile:
if not currLine:
continue
currTokens = currLine.split()
freq = int(currTokens[0])
cleanedWord = regex.sub('', currTokens[1])
readChildes.updateDictWithValue(charlesFreqDict, cleanedWord, freq)
# if cleanedWord == 'you':
# print charlesFreqDict[cleanedWord], cleanedWord, freq
def cleanDict(dictToClean, blacklist):
for word in blacklist:
if word in dictToClean:
try:
del dictToClean[word]
except KeyError:
pass
return dictToClean
def safeDivide(numerator, denominator):
if denominator > 0:
return (numerator / (denominator * 1.0))
else:
return 0.0
def readInCDI(inputFileName):
global childWords
with open(inputFileName,'r') as inputFile:
for currLine in inputFile:
if not currLine:
continue
if currLine[0] == '@':
continue
if currLine == '':
continue
currLine = currLine.rstrip().lower()
currLineTokens = currLine.split()
if len(currLineTokens) == 0:
continue
print currLineTokens
activeOrPassive = currLineTokens[0]
#print activeOrPassive
if activeOrPassive == 'speak' or activeOrPassive == 'understand':
for word in currLineTokens[1:]:
print word
childWords = readChildes.incrementDict(childWords, word)
def printOutputWithGlobalFreq(outputFile, globalTotalCorpusCount):
global childTotalCorpusCount, motherTotalCorpusCount, motherTotalCorpusCount, motherTotalIsoCount, motherTotalFinalNonIso
wordCount = 0
outputFile.write('word charlesFreqCount charlesFreqProb childAttested childCount childFreqProb motherCount motherFreqProb motherIsoCount motherIsoProb motherFinalNonIsoCount motherFinalNonIsoProb\n')
#for word in motherWords.iteritems():
for entry in motherWords.iteritems():
word = entry[0]
motherCount = entry[1]
motherIsoCount = 0
motherFinalNonIsoCount = 0
childCount = 0
charlesFreqCount = 0
childAttested = 0
if word in motherIsolatedWords:
motherIsoCount = motherIsolatedWords[word]
if word in motherRightEdgeNonIsoWords:
motherFinalNonIsoCount = motherRightEdgeNonIsoWords[word]
if word in childWords:
childCount = childWords[word]
childAttested = 1
if word in charlesFreqDict:
charlesFreqCount = int(charlesFreqDict[word])
if charlesFreqCount > 0:
## elements produced by mother (and also in Charles frequency list)
wordCount += 1
## Convert these to proportions as well
charlesFreqProb = charlesFreqCount / (1.0 * globalTotalCorpusCount)
charlesFreqLogProb = math.log(charlesFreqProb)
childFreqProb = childCount / (1.0 * childTotalCorpusCount)
# childFreqLogProb = math.log(childFreqProb)
motherFreqProb = motherCount / (1.0 * motherTotalCorpusCount)
motherIsoProb = motherIsoCount / (1.0 * motherTotalIsoCount)
# motherIsoLogProb = math.log(motherIsoProb)
motherFinalNonIsoProb = motherFinalNonIsoCount / (1.0 * motherTotalFinalNonIso)
# motherFinalNonIsoLogProb = math.log(motherFinalNonIsoProb)
outputFile.write(word + " " + str(charlesFreqCount) + " " + str(charlesFreqProb) + " " + str(childAttested) + " " + str(childCount) + " " + str(childFreqProb) + " " + str(motherCount) + " " + str(motherFreqProb) + " " + str(motherIsoCount) + " " + str(motherIsoProb) + " " + str(motherFinalNonIsoCount) + " " + str(motherFinalNonIsoProb) + "\n")
def printOutputNoGlobalInfo(outputFile):
global childTotalCorpusCount, motherTotalCorpusCount, motherTotalCorpusCount, motherTotalIsoCount, motherTotalFinalNonIso
### Add: POS
wordCount = 0
#outputFile.write('word POS binarizedTag charLength numSylls childAttested childCount childFreqProb motherCount motherBucket motherFreqProb motherIsoCount motherIsoBucket motherIsoProb motherFinalNonIsoCount motherFinalBucket motherFinalNonIsoProb\n')
outputFile.write('word POS nounStatus verbStatus charLength numSylls childAttested childCount motherCount motherBucket motherIsoCount motherIsoBucket motherFinalNonIsoCount motherFinalBucket\n')
#for word in motherWords.iteritems():
for entry in motherWords.iteritems():
word = entry[0]
motherCount = entry[1]
partOfSpeech = getMostFreqTag(word)
binarizedTag = binarizePOS(partOfSpeech)
nounStatus = 0
verbStatus = 0
if binarizedTag == 'noun':
nounStatus = 1
if binarizedTag == 'verb':
verbStatus = 1
wordLength = len(word)
numSylls = syllableCount.countVowelClusters(word)
motherIsoCount = 0
motherFinalNonIsoCount = 0
childCount = 0
childAttested = 0
if word in motherIsolatedWords:
motherIsoCount = motherIsolatedWords[word]
if word in motherRightEdgeNonIsoWords:
motherFinalNonIsoCount = motherRightEdgeNonIsoWords[word]
if word in childWords:
childCount = childWords[word]
childAttested = 1
motherTotalBucket = convertFreqCountToBucket(motherCount)
motherIsoBucket = convertFreqCountToBucket(motherIsoCount)
motherFinalBucket = convertFreqCountToBucket(motherFinalNonIsoCount)
## elements produced by mother
wordCount += 1
## Convert these to proportions as well
childFreqProb = safeDivide(childCount, childTotalCorpusCount)
motherFreqProb = safeDivide(motherCount, motherTotalCorpusCount)
motherIsoProb = safeDivide(motherIsoCount, motherTotalIsoCount)
motherFinalNonIsoProb = safeDivide(motherFinalNonIsoCount, motherTotalFinalNonIso)
#outputFile.write(word + " " + partOfSpeech + " " + binarizedTag + " " + str(wordLength) + " " + str(numSylls) + " " + str(childAttested) + " " + str(childCount) + " " + str(childFreqProb) + " " + str(motherCount) + " " + motherTotalBucket + " " + str(motherFreqProb) + " " + str(motherIsoCount) + " " + motherIsoBucket + " " + str(motherIsoProb) + " " + str(motherFinalNonIsoCount) + " " + motherFinalBucket + " " + str(motherFinalNonIsoProb) + "\n")
outputFile.write(word + " " + partOfSpeech + " " + str(nounStatus) + " " + str(verbStatus) + " " + str(wordLength) + " " + str(numSylls) + " " + str(childAttested) + " " + str(childCount) + " " + str(motherCount) + " " + motherTotalBucket + " " + str(motherIsoCount) + " " + motherIsoBucket + " " + str(motherFinalNonIsoCount) + " " + motherFinalBucket + "\n")
def binarizePOS(currPOS):
if currPOS == 'n':
return 'noun'
elif currPOS == 'v':
return 'verb'
else:
return 'other'
def convertFreqCountToBucket(count):
if count == 0:
return 'none'
elif count < 4:
return 'rare'
else:
return 'frequent'
##
## Main method block
##
if __name__=="__main__":
parser = argparse.ArgumentParser(description = "Dyad-Extraction for Studying Early Vocabulary Development")
parser.add_argument("inputDir", help="input directory containing corpus of child/care-giver dyad")
parser.add_argument("outputFile", help="output filename", type=argparse.FileType('w'))
parser.add_argument("-cdi", "--cdiFile", help="read in cdi file if provided", type=str, nargs='?', default='')
parser.add_argument("-ch", "--readChineseData", help="boolean for reading in Chinese data rather than Latin script", type=bool, nargs='?', default=False)
parser.add_argument("-f", "--globalFreqFile", help="read in global frequency file if provided", type=str, nargs='?', default='')
args = parser.parse_args()
print(args)
if not args.inputDir:
raise Exception("Need pointer to input directory!")
if not args.outputFile:
raise Exception("Need to specify output source!")
if args.cdiFile:
print 'reading CDI data'
readInCDI(args.cdiFile)
if args.readChineseData:
print 'reading Chinese data'
searchDirectory = os.getcwd() + '/' + args.inputDir
iterateSubDir(searchDirectory)
blacklist = ['xxx', 'yyy']
childWords = cleanDict(childWords, blacklist)
motherWords = cleanDict(motherWords, blacklist)
motherIsolatedWords = cleanDict(motherIsolatedWords, blacklist)
motherRightEdgeNonIsoWords = cleanDict(motherRightEdgeNonIsoWords, blacklist)
childTotalCorpusCount = sum(childWords.values())
motherTotalCorpusCount = sum(motherWords.values())
motherTotalIsoCount = sum(motherIsolatedWords.values())
motherTotalFinalNonIso = sum(motherRightEdgeNonIsoWords.values())
if args.globalFreqFile:
readWordList(args.globalFreqFile)
charlesTotalCorpusCount = sum(charlesFreqDict.values())
printOutputWithGlobalFreq(args.outputFile, charlesTotalCorpusCount)
else:
print('Running without global frequency list')
printOutputNoGlobalInfo(args.outputFile)
args.outputFile.close()
| |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright (C) 2006 Fluendo, S.A. (www.fluendo.com).
# Copyright 2006, Frank Scholz <coherence@beebits.net>
from os.path import abspath
import urlparse
from urlparse import urlsplit
from coherence.extern.et import parse_xml as et_parse_xml
from coherence import SERVER_ID
from twisted.web import server, http, static
from twisted.web import client, error
from twisted.web import proxy, resource, server
from twisted.internet import reactor, protocol, defer, abstract
from twisted.python import failure
from twisted.python.util import InsensitiveDict
try:
from twisted.protocols._c_urlarg import unquote
except ImportError:
from urllib import unquote
try:
import netifaces
have_netifaces = True
except ImportError:
have_netifaces = False
def means_true(value):
if isinstance(value, basestring):
value = value.lower()
return value in [True, 1, '1', 'true', 'yes', 'ok']
def generalise_boolean(value):
""" standardize the different boolean incarnations
transform anything that looks like a "True" into a '1',
and everything else into a '0'
"""
if means_true(value):
return '1'
return '0'
generalize_boolean = generalise_boolean
def parse_xml(data, encoding="utf-8"):
return et_parse_xml(data, encoding)
def parse_http_response(data):
""" don't try to get the body, there are reponses without """
header = data.split('\r\n\r\n')[0]
lines = header.split('\r\n')
cmd = lines[0].split(' ')
lines = map(lambda x: x.replace(': ', ':', 1), lines[1:])
lines = filter(lambda x: len(x) > 0, lines)
headers = [x.split(':', 1) for x in lines]
headers = dict(map(lambda x: (x[0].lower(), x[1]), headers))
return cmd, headers
def get_ip_address(ifname):
"""
determine the IP address by interface name
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/439094
(c) Paul Cannon
Uses the Linux SIOCGIFADDR ioctl to find the IP address associated
with a network interface, given the name of that interface, e.g. "eth0".
The address is returned as a string containing a dotted quad.
Updated to work on BSD. OpenBSD and OSX share the same value for
SIOCGIFADDR, and its likely that other BSDs do too.
Updated to work on Windows,
using the optional Python module netifaces
http://alastairs-place.net/netifaces/
Thx Lawrence for that patch!
"""
if have_netifaces:
if ifname in netifaces.interfaces():
iface = netifaces.ifaddresses(ifname)
ifaceadr = iface[netifaces.AF_INET]
# we now have a list of address dictionaries, there may be multiple addresses bound
return ifaceadr[0]['addr']
import sys
if sys.platform in ('win32', 'sunos5'):
return '127.0.0.1'
from os import uname
import socket
import fcntl
import struct
system_type = uname()[0]
if system_type == "Linux":
SIOCGIFADDR = 0x8915
else:
SIOCGIFADDR = 0xc0206921
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
SIOCGIFADDR,
struct.pack('256s', ifname[:15])
)[20:24])
except:
return '127.0.0.1'
def get_host_address():
""" try to get determine the interface used for
the default route, as this is most likely
the interface we should bind to (on a single homed host!)
"""
import sys
if sys.platform == 'win32':
if have_netifaces:
interfaces = netifaces.interfaces()
if len(interfaces):
return get_ip_address(interfaces[0]) # on windows assume first interface is primary
else:
try:
route_file = '/proc/net/route'
route = open(route_file)
if(route):
tmp = route.readline() # skip first line
while (tmp != ''):
tmp = route.readline()
l = tmp.split('\t')
if (len(l) > 2):
if l[1] == '00000000': # default route...
route.close()
return get_ip_address(l[0])
except IOError, msg:
""" fallback to parsing the output of netstat """
from twisted.internet import utils
def result(r):
from os import uname
(osname, _, _, _, _) = uname()
osname = osname.lower()
lines = r.split('\n')
for l in lines:
l = l.strip(' \r\n')
parts = [x.strip() for x in l.split(' ') if len(x) > 0]
if parts[0] in ('0.0.0.0', 'default'):
if osname[:6] == 'darwin':
return get_ip_address(parts[5])
else:
return get_ip_address(parts[-1])
return '127.0.0.1'
def fail(f):
return '127.0.0.1'
d = utils.getProcessOutput('netstat', ['-rn'])
d.addCallback(result)
d.addErrback(fail)
return d
except Exception, msg:
import traceback
traceback.print_exc()
""" return localhost if we haven't found anything """
return '127.0.0.1'
def de_chunk_payload(response):
try:
import cStringIO as StringIO
except ImportError:
import StringIO
""" This method takes a chunked HTTP data object and unchunks it."""
newresponse = StringIO.StringIO()
# chunked encoding consists of a bunch of lines with
# a length in hex followed by a data chunk and a CRLF pair.
response = StringIO.StringIO(response)
def read_chunk_length():
line = response.readline()
try:
len = int(line.strip(), 16)
except ValueError:
len = 0
return len
len = read_chunk_length()
while (len > 0):
newresponse.write(response.read(len))
line = response.readline() # after chunk and before next chunk length
len = read_chunk_length()
return newresponse.getvalue()
class Request(server.Request):
def process(self):
"Process a request."
# get site from channel
self.site = self.channel.site
# set various default headers
self.setHeader('server', SERVER_ID)
self.setHeader('date', http.datetimeToString())
self.setHeader('content-type', "text/html")
# Resource Identification
url = self.path
#remove trailing "/", if ever
url = url.rstrip('/')
scheme, netloc, path, query, fragment = urlsplit(url)
self.prepath = []
if path == "":
self.postpath = []
else:
self.postpath = map(unquote, path[1:].split('/'))
try:
def deferred_rendering(r):
self.render(r)
resrc = self.site.getResourceFor(self)
if resrc is None:
self.setResponseCode(http.NOT_FOUND, "Error: No resource for path %s" % path)
self.finish()
elif isinstance(resrc, defer.Deferred):
resrc.addCallback(deferred_rendering)
resrc.addErrback(self.processingFailed)
else:
self.render(resrc)
except:
self.processingFailed(failure.Failure())
class Site(server.Site):
noisy = False
requestFactory = Request
def startFactory(self):
pass
#http._logDateTimeStart()
class ProxyClient(proxy.ProxyClient):
def __init__(self, command, rest, version, headers, data, father):
log.Loggable.__init__(self)
#headers["connection"] = "close"
self.send_data = 0
web.ProxyClient.__init__(self, command, rest, version,
headers, data, father)
def handleStatus(self, version, code, message):
if message:
# Add a whitespace to message, this allows empty messages
# transparently
message = " %s" % (message, )
if version == 'ICY':
version = 'HTTP/1.1'
web.ProxyClient.handleStatus(self, version, code, message)
def handleHeader(self, key, value):
if not key.startswith('icy-'):
web.ProxyClient.handleHeader(self, key, value)
def handleResponsePart(self, buffer):
self.send_data += len(buffer)
web.ProxyClient.handleResponsePart(self, buffer)
class ProxyClientFactory(proxy.ProxyClientFactory):
# :fixme: Why here proxy.ProxyClient is used instad of our own
# ProxyClent? Is out ProxyClient used at all?
protocol = proxy.ProxyClient
class ReverseProxyResource(proxy.ReverseProxyResource):
"""
Resource that renders the results gotten from another server
Put this resource in the tree to cause everything below it to be relayed
to a different server.
@ivar proxyClientFactoryClass: a proxy client factory class, used to create
new connections.
@type proxyClientFactoryClass: L{ClientFactory}
@ivar reactor: the reactor used to create connections.
@type reactor: object providing L{twisted.internet.interfaces.IReactorTCP}
"""
proxyClientFactoryClass = ProxyClientFactory
def __init__(self, host, port, path, reactor=reactor):
"""
@param host: the host of the web server to proxy.
@type host: C{str}
@param port: the port of the web server to proxy.
@type port: C{port}
@param path: the base path to fetch data from. Note that you shouldn't
put any trailing slashes in it, it will be added automatically in
request. For example, if you put B{/foo}, a request on B{/bar} will
be proxied to B{/foo/bar}.
@type path: C{str}
"""
resource.Resource.__init__(self)
self.host = host
self.port = port
self.path = path
self.qs = ''
self.reactor = reactor
def getChild(self, path, request):
return ReverseProxyResource(
self.host, self.port, self.path + '/' + path)
def render(self, request):
"""
Render a request by forwarding it to the proxied server.
"""
# RFC 2616 tells us that we can omit the port if it's the default port,
# but we have to provide it otherwise
if self.port == 80:
request.requestHeaders.setRawHeaders('host', [self.host])
else:
hostname = "%s:%d" % (self.host, self.port)
request.requestHeaders.setRawHeaders('host', [hostname])
request.content.seek(0, 0)
qs = urlparse.urlparse(request.uri)[4]
if qs == '':
qs = self.qs
if qs:
rest = self.path + '?' + qs
else:
rest = self.path
clientFactory = self.proxyClientFactoryClass(
request.method, rest, request.clientproto,
request.getAllHeaders(), request.content.read(), request)
self.reactor.connectTCP(self.host, self.port, clientFactory)
return server.NOT_DONE_YET
def resetTarget(self, host, port, path, qs=''):
self.host = host
self.port = port
self.path = path
self.qs = qs
class ReverseProxyUriResource(ReverseProxyResource):
uri = None
def __init__(self, uri, reactor=reactor):
self.uri = uri
_, host_port, path, params, _ = urlsplit(uri)
if host_port.find(':') != -1:
host, port = tuple(host_port.split(':'))
port = int(port)
else:
host = host_port
port = 80
if path == '':
path = '/'
if params == '':
rest = path
else:
rest = '?'.join((path, params))
ReverseProxyResource.__init__(self, host, port, rest, reactor)
def resetUri (self, uri):
self.uri = uri
_, host_port, path, params, _ = urlsplit(uri)
if host_port.find(':') != -1:
host, port = tuple(host_port.split(':'))
port = int(port)
else:
host = host_port
port = 80
self.resetTarget(host, port, path, params)
# already on twisted.web since at least 8.0.0
class myHTTPPageGetter(client.HTTPPageGetter):
followRedirect = True
class HeaderAwareHTTPClientFactory(client.HTTPClientFactory):
protocol = myHTTPPageGetter
noisy = False
def buildProtocol(self, addr):
p = client.HTTPClientFactory.buildProtocol(self, addr)
p.method = self.method
p.followRedirect = self.followRedirect
return p
def page(self, page):
client.HTTPClientFactory.page(self, (page, self.response_headers))
# deprecated, do not use
# already in twisted.web since at least 1.3.0
HeaderAwareHTTPDownloader = client.HTTPDownloader
def getPage(url, contextFactory=None, *args, **kwargs):
"""
Download a web page as a string.
Download a page. Return a deferred, which will callback with a
page (as a string) or errback with a description of the error.
See HTTPClientFactory to see what extra args can be passed.
"""
# This function is like twisted.web.client.getPage, except it uses
# our HeaderAwareHTTPClientFactory instead of HTTPClientFactory
# and sets the user agent.
if 'headers' in kwargs and 'user-agent' in kwargs['headers']:
kwargs['agent'] = kwargs['headers']['user-agent']
elif not 'agent' in kwargs:
kwargs['agent'] = "Coherence PageGetter"
return client._makeGetterFactory(
url,
HeaderAwareHTTPClientFactory,
contextFactory=contextFactory,
*args, **kwargs).deferred
def downloadPage(url, file, contextFactory=None, *args, **kwargs):
"""Download a web page to a file.
@param file: path to file on filesystem, or file-like object.
See twisted.web.client.HTTPDownloader to see what extra args can
be passed.
"""
if 'headers' in kwargs and 'user-agent' in kwargs['headers']:
kwargs['agent'] = kwargs['headers']['user-agent']
elif not 'agent' in kwargs:
kwargs['agent'] = "Coherence PageGetter"
return client.downloadPage(url, file, contextFactory=contextFactory,
*args, **kwargs)
# StaticFile used to be a patched version of static.File. The later
# was fixed in TwistedWeb 8.2.0 and 9.0.0, while the patched variant
# contained deprecated and removed code.
StaticFile = static.File
class BufferFile(static.File):
""" taken from twisted.web.static and modified
accordingly to the patch by John-Mark Gurney
http://resnet.uoregon.edu/~gurney_j/jmpc/dist/twisted.web.static.patch
"""
def __init__(self, path, target_size=0, *args):
static.File.__init__(self, path, *args)
self.target_size = target_size
self.upnp_retry = None
def render(self, request):
#print ""
#print "BufferFile", request
# FIXME detect when request is REALLY finished
if request is None or request.finished:
print "No request to render!"
return ''
"""You know what you doing."""
self.restat()
if self.type is None:
self.type, self.encoding = static.getTypeAndEncoding(self.basename(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
if not self.exists():
return self.childNotFound.render(request)
if self.isdir():
return self.redirect(request)
#for content-length
if (self.target_size > 0):
fsize = size = int(self.target_size)
else:
fsize = size = int(self.getFileSize())
#print fsize
if size == int(self.getFileSize()):
request.setHeader('accept-ranges', 'bytes')
if self.type:
request.setHeader('content-type', self.type)
if self.encoding:
request.setHeader('content-encoding', self.encoding)
try:
f = self.openForReading()
except IOError, e:
import errno
if e[0] == errno.EACCES:
return error.ForbiddenResource().render(request)
else:
raise
if request.setLastModified(self.getmtime()) is http.CACHED:
return ''
trans = True
range = request.getHeader('range')
#print "StaticFile", range
tsize = size
if range is not None:
# This is a request for partial data...
bytesrange = range.split('=')
assert bytesrange[0] == 'bytes', \
"Syntactically invalid http range header!"
start, end = bytesrange[1].split('-', 1)
if start:
start = int(start)
# Are we requesting something beyond the current size of the file?
if (start >= self.getFileSize()):
# Retry later!
print bytesrange
print "Requesting data beyond current scope -> postpone rendering!"
self.upnp_retry = reactor.callLater(1.0, self.render, request)
return server.NOT_DONE_YET
f.seek(start)
if end:
#print ":%s" % end
end = int(end)
else:
end = size - 1
else:
lastbytes = int(end)
if size < lastbytes:
lastbytes = size
start = size - lastbytes
f.seek(start)
fsize = lastbytes
end = size - 1
size = end + 1
fsize = end - int(start) + 1
# start is the byte offset to begin, and end is the byte offset
# to end.. fsize is size to send, tsize is the real size of
# the file, and size is the byte position to stop sending.
if fsize <= 0:
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
fsize = tsize
trans = False
else:
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader('content-range', "bytes %s-%s/%s " % (
str(start), str(end), str(tsize)))
#print "StaticFile", start, end, tsize
request.setHeader('content-length', str(fsize))
if request.method == 'HEAD' or trans == False:
# pretend we're a HEAD request, so content-length
# won't be overwritten.
request.method = 'HEAD'
return ''
#print "StaticFile out", request.responseHeaders, request.code
# return data
# size is the byte position to stop sending, not how many bytes to send
BufferFileTransfer(f, size - f.tell(), request)
# and make sure the connection doesn't get closed
return server.NOT_DONE_YET
class BufferFileTransfer(object):
"""
A class to represent the transfer of a file over the network.
"""
request = None
def __init__(self, file, size, request):
self.file = file
self.size = size
self.request = request
self.written = self.file.tell()
request.registerProducer(self, 0)
def resumeProducing(self):
#print "resumeProducing", self.request,self.size,self.written
if not self.request:
return
data = self.file.read(min(abstract.FileDescriptor.bufferSize, self.size - self.written))
if data:
self.written += len(data)
# this .write will spin the reactor, calling .doWrite and then
# .resumeProducing again, so be prepared for a re-entrant call
self.request.write(data)
if self.request and self.file.tell() == self.size:
self.request.unregisterProducer()
self.request.finish()
self.request = None
def pauseProducing(self):
pass
def stopProducing(self):
#print "stopProducing",self.request
self.request.unregisterProducer()
self.file.close()
self.request.finish()
self.request = None
from datetime import datetime, tzinfo, timedelta
import random
class _tz(tzinfo):
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self._name
def dst(self, dt):
return timedelta(0)
class _CET(_tz):
_offset = timedelta(minutes=60)
_name = 'CET'
class _CEST(_tz):
_offset = timedelta(minutes=120)
_name = 'CEST'
_bdates = [datetime(1997,2,28,17,20,tzinfo=_CET()), # Sebastian Oliver
datetime(1999,9,19,4,12,tzinfo=_CEST()), # Patrick Niklas
datetime(2000,9,23,4,8,tzinfo=_CEST()), # Saskia Alexa
datetime(2003,7,23,1,18,tzinfo=_CEST()), # Mara Sophie
# you are the best!
]
def datefaker():
return random.choice(_bdates)
| |
"""
Core abstractions used in OpenNSA.
In design pattern terms, these would be Data Transfer Objects (DTOs).
Though some of them do actually have some functionality methods.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2011-2013)
"""
import uuid
import random
import urlparse
import itertools
from opennsa import error, constants as cnt
LOG_SYSTEM = 'opennsa.nsa'
URN_UUID_PREFIX = 'urn:uuid:'
BIDIRECTIONAL = 'Bidirectional'
class NSIHeader(object):
def __init__(self, requester_nsa, provider_nsa, correlation_id=None, reply_to=None, security_attributes=None, connection_trace=None):
self.requester_nsa = requester_nsa
self.provider_nsa = provider_nsa
self.correlation_id = correlation_id or self._createCorrelationId()
self.reply_to = reply_to
self.security_attributes = security_attributes or []
self.connection_trace = connection_trace
def _createCorrelationId(self):
return URN_UUID_PREFIX + str(uuid.uuid1())
def newCorrelationId(self):
self.correlation_id = self._createCorrelationId()
def __repr__(self):
return '<NSIHeader: %s, %s, %s, %s, %s, %s>' % (self.requester_nsa, self.provider_nsa, self.correlation_id, self.reply_to, self.security_attributes, self.connection_trace)
class SecurityAttribute(object):
# a better name would be AuthZAttribute, but we are keeping the NSI lingo
def __init__(self, type_, value):
assert type(type_) is str, 'SecurityAttribute type must be a string, not %s' % type(type_)
assert type(value) is str, 'SecurityAttribute value must be a string, not %s' % type(value)
self.type_ = type_
self.value = value
def match(self, sa):
assert type(sa) is SecurityAttribute, 'Can only compare SecurityAttribute with another SecurityAttribute'
return self.type_ == sa.type_ and self.value == sa.value
def __repr__(self):
return '<SecurityAttribute: %s = %s>' % (self.type_, self.value)
class EmptyLabelSet(Exception):
pass
class Label(object):
def __init__(self, type_, values=None):
assert type(values) in (None, str, list), 'Type of Label values must be a None, str, or list. Was given %s' % type(values)
self.type_ = type_
self.values = self._parseLabelValues(values) if values is not None else None
def _parseLabelValues(self, values):
def createValue(value):
try:
if '-' in value:
v1, v2 = value.split('-', 1)
i1, i2 = int(v1), int(v2)
if i1 > i2:
raise error.PayloadError('Label value %s is in descending order, which is not allowed.' % value)
else:
i1 = int(value)
i2 = i1
return i1, i2
except ValueError:
raise error.PayloadError('Label %s is not an integer or an integer range.' % value)
if type(values) is str:
values = values.split(',')
parsed_values = sorted( [ createValue(value) for value in values ] )
# detect any overlap and remove it - remember that the list is sorted
nv = [] # normalized values
for v1, v2 in parsed_values:
if len(nv) == 0:
nv.append( (v1,v2) )
continue
l = nv[-1] # last
if v1 <= l[1] + 1: # merge
nv = nv[:-1] + [ (l[0], max(l[1],v2)) ]
else:
nv.append( (v1,v2) )
return nv
def intersect(self, other):
# get the common values between two labels
assert type(other) is Label, 'Cannot intersect label with something that is not a label (other was %s)' % type(other)
assert self.type_ == other.type_, 'Cannot insersect label of different types'
label_values = []
i = iter(other.values)
o1, o2 = i.next()
for v1, v2 in self.values:
while True:
if v2 < o1:
break
elif o2 < v1:
try:
o1, o2 = i.next()
except StopIteration:
break
continue
label_values.append( ( max(v1,o1), min(v2,o2)) )
if v2 <= o2:
break
elif o2 <= v2:
try:
o1, o2 = i.next()
except StopIteration:
break
if len(label_values) == 0:
raise EmptyLabelSet('Label intersection produced empty label set')
ls = ','.join( [ '%i-%s' % (nv[0], nv[1]) for nv in label_values ] )
return Label(self.type_, ls)
def labelValue(self):
vs = [ str(v1) if v1 == v2 else str(v1) + '-' + str(v2) for v1,v2 in self.values ]
return ','.join(vs)
def singleValue(self):
return len(self.values) == 1 and self.values[0][0] == self.values[0][1]
def enumerateValues(self):
lv = [ range(lr[0], lr[1]+1) for lr in self.values ]
return list(itertools.chain.from_iterable( lv ) )
def randomLabel(self):
# not evenly distributed, but that isn't promised anyway
label_range = random.choice(self.values)
return random.randint(label_range[0], label_range[1]+1)
@staticmethod
def canMatch(l1, l2):
if l1 is None and l2 is None:
return True
elif l1 is None or l2 is None:
return False
try:
l1.intersect(l2) # this checks type as well as range
return True
except EmptyLabelSet:
return False
def __eq__(self, other):
if not type(other) is Label:
return False
return self.type_ == other.type_ and sorted(self.values) == sorted(other.values)
def __repr__(self):
return '<Label %s:%s>' % (self.type_, self.labelValue())
class STP(object): # Service Termination Point
def __init__(self, network, port, label=None):
assert type(network) is str, 'Invalid network type provided for STP'
assert type(port) is str, 'Invalid port type provided for STP'
assert label is None or type(label) is Label, 'Invalid label type provided for STP'
self.network = network
self.port = port
self.label = label
def shortName(self):
base = '%s:%s' % (self.network, self.port)
if self.label:
base += '?' + self.label.type_.split('#')[-1] + '=' + self.label.labelValue()
return base
def baseURN(self):
return cnt.URN_OGF_PREFIX + self.network + ':' + self.port
def urn(self):
# one could probably do something clever with this and the above two functions
label = ''
if self.label is not None:
label = '?' + self.label.type_.split('#')[-1] + '=' + self.label.labelValue()
return self.baseURN() + label
def __eq__(self, other):
if not type(other) is STP:
return False
return self.network == other.network and self.port == other.port and self.label == other.label
def __repr__(self):
return '<STP %s>' % self.shortName()
class Link(object):
def __init__(self, src_stp, dst_stp):
if src_stp.label is None:
assert dst_stp.label is None, 'Source and destination label must either both be None, or both specified'
else:
assert dst_stp.label is not None, 'Source and destination label must either both be None, or both specified'
self.src_stp = src_stp
self.dst_stp = dst_stp
def sourceSTP(self):
return self.src_stp
def destSTP(self):
return self.dst_stp
def __eq__(self, other):
if not type(other) is Link:
return False
return (self.src_stp, self.dst_stp) == (other.src_stp, other.dst_stp)
def __repr__(self):
return '<Link %s == %s>' % (self.src_stp, self.dst_stp)
class Path(object):
"""
Represent a path from a source and destitionation STP, with the endpoint pairs between them.
"""
def __init__(self, network_links):
self.network_links = network_links
def links(self):
return self.network_links
def sourceEndpoint(self):
return self.network_links[0].sourceSTP()
def destEndpoint(self):
return self.network_links[-1].destSTP()
def __str__(self):
return '<Path: ' + ' '.join( [ str(nl) for nl in self.network_links ] ) + '>'
class NetworkServiceAgent(object):
def __init__(self, identity, endpoint, service_type=None):
assert type(identity) is str, 'NSA identity type must be string (type: %s, value %s)' % (type(identity), identity)
assert type(endpoint) is str, 'NSA endpoint type must be string (type: %s, value %s)' % (type(endpoint), endpoint)
self.identity = identity
self.endpoint = endpoint.strip()
self.service_type = service_type
def getHostPort(self):
url = urlparse.urlparse(self.endpoint)
host, port = url.netloc.split(':',2)
port = int(port)
return host, port
def urn(self):
return cnt.URN_OGF_PREFIX + self.identity
def getServiceType(self):
if self.service_type is None:
raise ValueError('NSA with identity %s is not constructed with a type' % self.identity)
return self.service_type
def __str__(self):
return '<NetworkServiceAgent %s>' % self.identity
class ConnectionInfo(object):
# only used for query results
def __init__(self, connection_id, global_reservation_id, description, service_type, criterias, provider_nsa, requester_nsa, states, notification_id, result_id):
assert type(criterias) is list, 'Invalid criterias type: %s' % str(type(criterias))
for criteria in criterias:
assert type(criteria) is QueryCriteria, 'Invalid criteria type: %s' % str(type(criteria))
self.connection_id = connection_id
self.global_reservation_id = global_reservation_id
self.description = description
self.service_type = service_type
self.criterias = criterias
self.provider_nsa = provider_nsa
self.requester_nsa = requester_nsa
self.states = states
self.notification_id = notification_id
self.result_id = result_id
class Criteria(object):
def __init__(self, revision, schedule, service_def):
self.revision = revision
self.schedule = schedule
self.service_def = service_def
class QueryCriteria(Criteria):
# only used for query summary and recursive (but not really used in summary)
def __init__(self, revision, schedule, service_def, children=None):
assert children is None or type(children) is list, 'Invalid QueryCriteria type: %s' % str(type(children))
for child in children or []:
assert type(child) is ConnectionInfo, 'Invalid QueryCriteria child: %s' % str(type(child))
Criteria.__init__(self, revision, schedule, service_def)
self.children = children or []
class Schedule(object):
def __init__(self, start_time, end_time):
# Must be datetime instances without tzinfo
if start_time is not None:
assert start_time.tzinfo is None, 'Start time must NOT have time zone'
assert end_time.tzinfo is None, 'End time must NOT have time zone'
self.start_time = start_time
self.end_time = end_time
def __str__(self):
return '<Schedule: %s-%s>' % (self.start_time, self.end_time)
class Point2PointService(object):
def __init__(self, source_stp, dest_stp, capacity, directionality=BIDIRECTIONAL, symmetric=None, ero=None, parameters=None):
if directionality is None:
raise error.MissingParameterError('directionality must be defined, must not be None')
self.source_stp = source_stp
self.dest_stp = dest_stp
self.capacity = capacity
self.directionality = directionality
self.symmetric = symmetric
self.ero = ero
self.parameters = parameters
| |
"""
Created on Thu Mar 24 08:18:04 2016
@author: npop
The remote reference processor calculates different types of spectra
Inherits from single site processor
Just does remote reference computations
"""
import os
import numpy as np
import scipy.signal as signal
import scipy.interpolate as interp
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# utils
from utilsFreq import *
from utilsIO import *
from utilsRobust import *
from utilsProcess import *
# import ProcessorSingleSite
from processorSingleSite import ProcessorSingleSite
class ProcessorRemoteReference(ProcessorSingleSite):
###################
### SET DEFAULTS
##################
def setDefaults(self):
# inputs
self.inSite = ""
self.inChannels = []
self.outSite = ""
self.outChannels = []
self.remoteSite = ""
self.remoteChannels = []
# evaluation frequency data
self.evalFreq = []
self.evalFreqEqns = []
# smoothing options
self.win = "hanning"
self.winSmooth = -1
# intercept options
self.intercept = False
# output filename
self.prepend = ""
###################
### GET GENERAL INFO
##################
def getRemoteSite(self):
return self.remoteSite
def getRemoteChannels(self):
return self.remoteChannels
def getRemoteSize(self):
return self.remoteSize
###################
### SET REMOTE REFERENCE
##################
def setRemote(self, remoteSite, remoteChannels):
self.remoteSite = remoteSite
self.remoteChannels = remoteChannels
self.remoteSize = len(remoteChannels)
self.printText("Remote reference set with site {} and channels {}".format(self.remoteSite, self.remoteChannels))
###################
### PROCESS - ONLY THIS FUNCTION IS DIFFERENT
##################
def process(self):
# different types of solution
evalFreqEqnsTest = []
evalFreqEqnsTest2 = []
evalFreqEqnsTest3 = []
evalFreqEqnsTest4 = []
evalFreqVarsTest4 = []
evalFreqEqnsTest5 = []
# for each decimation level
# read in the shared windows from all sites
# for each evaluation frequency, store the data from each window
# and then at the end, perform robust processing
numLevels = self.getDecParams().getNumLevels()
inChans = self.getInChannels()
outChans = self.getOutChannels()
dataChans = inChans + outChans
remoteChans = self.getRemoteChannels()
for iDec in xrange(0, numLevels):
# print out some info
self.printText("Processing decimation level {}".format(iDec))
fs = self.getWinSelector().getDecParams().getSampleFreqLevel(iDec)
# get the number of all shared windows and the number of unmasked windows
# unmasked windows are ones that will actually be used in the calculation
numWindows = self.getWinSelector().getNumSharedWindows(iDec)
unmaskedWindows = self.getWinSelector().getUnmaskedWindowsLevel(iDec)
numUnmasked = len(unmaskedWindows)
self.printText("Total shared windows for decimation level = {}".format(numWindows))
self.printText("Total unmasked windows for decimation level = {}".format(numUnmasked))
if numUnmasked == 0:
self.printText("No unmasked windows found at this decimation level, continuing to next level".format(iDec))
continue # continue to next decimation level
self.printText("{} windows will be processed".format(numUnmasked))
# get the evaluation frequencies
evalFreq = self.getDecParams().getEvalFrequenciesForLevel(iDec)
# set some variables
totalChans = self.getInSize() + self.getOutSize()
numEvalFreq = len(evalFreq)
dataSize = self.getWinSelector().getDataSize(iDec)
freq = np.linspace(0, fs/2, dataSize)
# get the window smoothing params
smoothLen = self.getWindowSmooth(datasize=dataSize)
# create the data array
# for each evaluation frequency
# keep the spectral power information for all windows
evalFreqData = np.empty(shape=(numEvalFreq, numWindows, totalChans, self.getRemoteSize()), dtype="complex")
# an array for the in and out channels fourier data
winDataArray = np.empty(shape=(totalChans, dataSize), dtype="complex")
# an array for the remote reference fourier data
winRemoteArray = np.empty(shape=(self.getRemoteSize(), dataSize), dtype="complex")
# an array for the power spectra data
winSpectraMatrix = np.empty(shape=(totalChans, self.getRemoteSize(), dataSize), dtype="complex")
# loop over shared windows
localWin = 0
global2local = {}
for iWin in unmaskedWindows:
# do the local to global map
global2local[iWin] = localWin
# get the window for the input site
inSF, inReader = self.getWinSelector().getSpecReaderForWindow(self.getInSite(), iDec, iWin)
inData = inReader.readBinaryWindowGlobal(iWin)
# get the window and channels for the output site
if self.getOutSite() != self.getInSite():
outSF, outReader = self.getWinSelector().getSpecReaderForWindow(self.getOutSite(), iDec, iWin)
outData = outReader.readBinaryWindowGlobal(iWin)
else:
outData = inData
# now get the remote reference data - assume this does not equal input or output
remoteSF, remoteReader = self.getWinSelector().getSpecReaderForWindow(self.getRemoteSite(), iDec, iWin)
remoteData = remoteReader.readBinaryWindowGlobal(iWin)
# get data into the right part of the arrays
for i in xrange(0, self.getInSize()):
winDataArray[i] = inData[inChans[i]]
for i in xrange(0, self.getOutSize()):
winDataArray[self.getInSize() + i] = outData[outChans[i]]
for i in xrange(0, self.getRemoteSize()):
winRemoteArray[i] = remoteData[remoteChans[i]]
# and now can fill the parts of the matrix
# recall, smooth the power spectra
for iD, dataChan in enumerate(dataChans):
for iR, remoteChan in enumerate(remoteChans):
# calculate each one, cannot use complex symmetry here
# cannot use conjugate symmetry like with the single site processor
winSpectraMatrix[iD,iR] = smooth1d(winDataArray[iD] * np.conjugate(winRemoteArray[iR]), smoothLen, self.getWindow())
# after running through all windows, calculate evaluation frequencies
# calculate frequency array
evalFreqData[:, localWin] = self.calcEvalFrequencyData(freq, evalFreq, winSpectraMatrix)
# increment local window
localWin = localWin + 1
# now all the data has been collected
# for each evaluation frequency, do the robust processing
# and get the evaluation frequency data
evalFreqEqns = []
for eIdx in xrange(0, numEvalFreq):
self.printText("Processing evaluation frequency = {:.6f} [Hz], period = {:.6f} [s]".format(evalFreq[eIdx], 1.0/evalFreq[eIdx]))
# get the constrained windows for the evaluation frequency
evalFreqWindows = self.getWinSelector().getWindowsForFreq(iDec, eIdx)
if len(evalFreqWindows) == 0: # no windows meet constraints
self.printText("No windows found - possibly due to masking")
continue
localWinIndices = []
for iW in evalFreqWindows:
localWinIndices.append(global2local[iW])
self.printText("{:d} windows will be solved for".format(len(localWinIndices)))
# restrict processing to data that meets constraints for this evaluation frequency
# add to class vars
self.evalFreq.append(evalFreq[eIdx])
# use process reduced - only the input channels from the remote reference
# print "Prepare linear equation"
numSolveWindows, obs, reg = self.prepareLinearEqn(evalFreqData[eIdx, localWinIndices])
# print "Robust process"
self.evalFreqEqns.append(self.robustProcess(numSolveWindows, obs, reg))
# print "Robust process stacking solve"
# evalFreqEqnsTest.append(self.robustProcessStack(numSolveWindows, obs, reg))
# print "Robust OLS"
# evalFreqEqnsTest2.append(self.robustProcessOLS(numSolveWindows, obs, reg))
# print "Robust stacked"
# evalFreqEqnsTest3.append(self.stackedProcess(evalFreqData[eIdx, localWinIndices]))
# print "Robust CM"
out, var = self.robustProcessCM(numSolveWindows, obs, reg)
evalFreqEqnsTest4.append(out)
evalFreqVarsTest4.append(var)
# evalFreqEqnsTest4.append(self.robustProcessCM(numSolveWindows, obs, reg))
# evalFreqEqnsTest5.append(self.robustProcessCMMod(numSolveWindows, obs, reg))
# write out all the data
self.writeTF(self.getPrepend() + "_mmest", self.evalFreq, self.evalFreqEqns)
# self.writeTF(self.getPrepend() + "_mestStack", self.evalFreq, evalFreqEqnsTest)
# self.writeTF(self.getPrepend() + "_ols", self.evalFreq, evalFreqEqnsTest2)
# self.writeTF(self.getPrepend() + "_stacked", self.evalFreq, evalFreqEqnsTest3)
self.writeTF(self.getPrepend() + "_cm", self.evalFreq, evalFreqEqnsTest4, variances=evalFreqVarsTest4)
# self.writeTF(self.getPrepend() + "_cmMod", self.evalFreq, evalFreqEqnsTest5)
###################
### SOLVER ROUTINES
###################
def prepareLinearEqn(self, data):
# prepare observations and regressors for linear processing
numWindows = data.shape[0]
numWindows, data = self.checkForBadValues(numWindows, data)
# for each output variable, have ninput regressor variables
# let's construct our arrays
obs = np.empty(shape=(self.getOutSize(), self.getRemoteSize()*numWindows), dtype="complex")
reg = np.empty(shape=(self.getOutSize(), self.getRemoteSize()*numWindows, self.getInSize()), dtype="complex")
for iW in xrange(0, numWindows):
iOffset = iW*self.getRemoteSize()
for i in xrange(0, self.getOutSize()):
for j in xrange(0, self.getRemoteSize()):
# this is the observation row where,i is the observed output
obs[i, iOffset + j] = data[iW, self.getInSize() + i, j]
for k in xrange(0, self.getInSize()):
reg[i, iOffset + j, k] = data[iW, k, j]
return numWindows, obs, reg
def robustProcessStack(self, numWindows, obs, reg):
# loop over the outputs
output = np.zeros(shape=(self.getOutSize(), self.getInSize()), dtype="complex")
for i in xrange(0, self.getOutSize()):
# lets out some easier lettering
y = obs[i]
A = reg[i]
# get some sizes
n = A.shape[0]
p = A.shape[1]
# first calculate the leverage weights
# this is based on the hat matrix
q, r = linalg.qr(A)
Pdiag = np.empty(shape=(n), dtype="float")
for iRow in xrange(0, n):
Pdiag[iRow] = np.absolute(np.sum(q[iRow,:]*np.conjugate(q[iRow,:]))).real
del q, r
Pdiag = Pdiag/np.max(Pdiag)
leverageScale = sampleMAD0(Pdiag)
leverageWeights = getRobustLocationWeights(Pdiag/leverageScale, "huber")
# Begin with stacking the data and solving
observation = np.zeros(shape=(self.getRemoteSize()), dtype="complex")
predictors = np.zeros(shape=(self.getRemoteSize(), self.getInSize()), dtype="complex")
for iChan in xrange(0, self.getRemoteSize()):
# now need to have my indexing array
indexArray = np.arange(iChan, numWindows*self.getRemoteSize(), self.getRemoteSize())
observation[iChan] = np.sum(y[indexArray])/numWindows
# now for the regressors
for j in xrange(0, self.getInSize()):
predictors[iChan, j] = np.sum(A[indexArray, j])/numWindows
initParams, residsStacked, scaleStacked, weightsStacked = mmestimateModel(predictors, observation, intercept=False)
# calculate out scale and weights
resids = y - np.dot(A, initParams)
scale = sampleMAD0(resids)
weights = getRobustLocationWeights(resids/scale, "huber")*leverageWeights
# now get m-estimates and do the process again
maxiter = 50
iteration = 0
while iteration < maxiter:
# now stack with the weights and solve again
observation = np.zeros(shape=(self.getRemoteSize()), dtype="complex")
predictors = np.zeros(shape=(self.getRemoteSize(), self.getInSize()), dtype="complex")
for iChan in xrange(0, self.getRemoteSize()):
# now need to have my indexing array
indexArray = np.arange(iChan, numWindows*self.getRemoteSize(), self.getRemoteSize())
weightsLim = weights[indexArray]
# weightsLim = weightsLim/np.sum(weightsLim) # normalise weights to 1
observation[iChan] = np.sum(y[indexArray]*weightsLim)/numWindows
# now for the regressors
for j in xrange(0, self.getInSize()):
predictors[iChan, j] = np.sum(A[indexArray, j]*weightsLim)/numWindows
paramsNew, residsStacked, scaleStacked, weightsStacked = mmestimateModel(predictors, observation)
# now calculate residsNew etc
residsNew = y - np.dot(A, paramsNew)
if np.sum(np.absolute(residsNew)) < eps():
# then return everything here
params = paramsNew
resids = residsNew
break
scale = sampleMAD0(residsNew)
# standardise and calculate weights
weightsNew = getRobustLocationWeights(residsNew/scale, "huber")*leverageWeights
# increment iteration and save weightsNew
iteration = iteration + 1
weights = weightsNew
params = paramsNew
# check to see whether the change is smaller than the tolerance
# use the R method of checking change in residuals (can check change in params)
changeResids = linalg.norm(residsNew-resids)/linalg.norm(residsNew)
if changeResids < eps():
# update residuals
resids = residsNew
break
# update residuals
resids = residsNew
# another go with tukey weights
# return to original solution
resids = y - np.dot(A, initParams)
weights = getRobustLocationWeights(resids/scale, "bisquare")*leverageWeights
while iteration < maxiter:
# now stack with the weights and solve again
observation = np.zeros(shape=(self.getRemoteSize()), dtype="complex")
predictors = np.zeros(shape=(self.getRemoteSize(), self.getInSize()), dtype="complex")
for iChan in xrange(0, self.getRemoteSize()):
# now need to have my indexing array
indexArray = np.arange(iChan, numWindows*self.getRemoteSize(), self.getRemoteSize())
weightsLim = weights[indexArray]
# weightsLim = weightsLim/np.sum(weightsLim) # normalise weights to 1
observation[iChan] = np.sum(y[indexArray]*weightsLim)/numWindows
# now for the regressors
for j in xrange(0, self.getInSize()):
predictors[iChan, j] = np.sum(A[indexArray, j]*weightsLim)/numWindows
paramsNew, residsStacked, scaleStacked, weightsStacked = mmestimateModel(predictors, observation)
# now calculate residsNew etc
residsNew = y - np.dot(A, paramsNew)
if np.sum(np.absolute(residsNew)) < eps():
# then return everything here
params = paramsNew
resids = residsNew
break
scale = sampleMAD0(residsNew)
# standardise and calculate weights
weightsNew = getRobustLocationWeights(residsNew/scale, "bisquare")*leverageWeights
# increment iteration and save weightsNew
iteration = iteration + 1
weights = weightsNew
params = paramsNew
# check to see whether the change is smaller than the tolerance
# use the R method of checking change in residuals (can check change in params)
changeResids = linalg.norm(residsNew-resids)/linalg.norm(residsNew)
if changeResids < eps():
# update residuals
resids = residsNew
break
# update residuals
resids = residsNew
output[i] = params
return output
def robustProcess(self, numWindows, obs, reg):
# do the mmestimate robust processing for a single evaluation frequency
# create array for output
output = np.empty(shape=(self.getOutSize(), self.getInSize()), dtype="complex")
for i in xrange(0, self.getOutSize()):
observation = obs[i,:]
predictors = reg[i,:,:]
# save the output
out, resids, scale, weights = mmestimateModel(predictors, observation, intercept=self.getIntercept())
# now take the weights, apply to the observations and predictors, stack the appropriate rows and test
observation2 = np.zeros(shape=(self.getRemoteSize()), dtype="complex")
predictors2 = np.zeros(shape=(self.getRemoteSize(), self.getInSize()), dtype="complex")
for iChan in xrange(0, self.getRemoteSize()):
# now need to have my indexing array
indexArray = np.arange(iChan, numWindows*self.getRemoteSize(), self.getRemoteSize())
weightsLim = weights[indexArray]
# weightsLim = weightsLim/np.sum(weightsLim) # normalise weights to 1
observation2[iChan] = np.sum(obs[i, indexArray]*weightsLim)/numWindows
# now for the regressors
for j in xrange(0, self.getInSize()):
predictors2[iChan, j] = np.sum(reg[i, indexArray, j]*weightsLim)/numWindows
out, resids, scale, weights = mmestimateModel(predictors2, observation2, intercept=self.getIntercept())
if self.getIntercept():
output[i] = out[1:]
else:
output[i] = out
return output
def robustProcessCM(self, numWindows, obs, reg):
# do the chatterjeeMachlerMod robust processing for a single evaluation frequency
# create array for output
output = np.empty(shape=(self.getOutSize(), self.getInSize()), dtype="complex")
varOutput = np.empty(shape=(self.getOutSize(), self.getInSize()), dtype="float")
# solve
for i in xrange(0, self.getOutSize()):
observation = obs[i,:]
predictors = reg[i,:,:]
# save the output
out, resids, weights = chatterjeeMachler(predictors, observation, intercept=self.getIntercept())
# now take the weights, apply to the observations and predictors, stack the appropriate rows and test
observation2 = np.zeros(shape=(self.getRemoteSize()), dtype="complex")
predictors2 = np.zeros(shape=(self.getRemoteSize(), self.getInSize()), dtype="complex")
for iChan in xrange(0, self.getRemoteSize()):
# now need to have my indexing array
indexArray = np.arange(iChan, numWindows*self.getRemoteSize(), self.getRemoteSize())
weightsLim = weights[indexArray]
# weightsLim = weightsLim/np.sum(weightsLim) # normalise weights to 1
observation2[iChan] = np.sum(obs[i, indexArray]*weightsLim)/numWindows
# now for the regressors
for j in xrange(0, self.getInSize()):
predictors2[iChan, j] = np.sum(reg[i, indexArray, j]*weightsLim)/numWindows
out, resids, weights = chatterjeeMachler(predictors2, observation2, intercept=self.getIntercept())
# now calculate out the varainces - have the solution out, have the weights
# recalculate out the residuals with the final solution
# calculate standard deviation of residuals
# and then use chatterjee machler formula to estimate variances
# this needs work - better to use an empirical bootstrap method, but this will do for now
resids = np.absolute(observation - np.dot(predictors, out))
scale = sampleMAD0(resids) # some measure of standard deviation, rather than using the standard deviation
residsVar = scale*scale
varPred = np.dot(hermitianTranspose(predictors), weights*predictors)
varPred = np.linalg.inv(varPred) # this is a pxp matrix
varOut = 1.91472*residsVar*varPred
print varOut
varOut = np.diag(varOut).real # this should be a real number
print varOut
if self.getIntercept():
output[i] = out[1:]
varOutput[i] = varOut[1:]
else:
output[i] = out
varOutput[i] = varOut
return output, varOutput
def robustProcessCMMod(self, numWindows, obs, reg):
# do the chatterjeeMachlerMod robust processing for a single evaluation frequency
# create array for output
output = np.empty(shape=(self.getOutSize(), self.getInSize()), dtype="complex")
# solve
for i in xrange(0, self.getOutSize()):
observation = obs[i,:]
predictors = reg[i,:,:]
# save the output
out, resids, weights = chatterjeeMachlerMod(predictors, observation, intercept=self.getIntercept())
# now take the weights, apply to the observations and predictors, stack the appropriate rows and test
observation2 = np.zeros(shape=(self.getRemoteSize()), dtype="complex")
predictors2 = np.zeros(shape=(self.getRemoteSize(), self.getInSize()), dtype="complex")
for iChan in xrange(0, self.getRemoteSize()):
# now need to have my indexing array
indexArray = np.arange(iChan, numWindows*self.getRemoteSize(), self.getRemoteSize())
weightsLim = weights[indexArray]
# weightsLim = weightsLim/np.sum(weightsLim) # normalise weights to 1
observation2[iChan] = np.sum(obs[i, indexArray]*weightsLim)/numWindows
# now for the regressors
for j in xrange(0, self.getInSize()):
predictors2[iChan, j] = np.sum(reg[i, indexArray, j]*weightsLim)/numWindows
out, resids, weights = chatterjeeMachlerMod(predictors2, observation2, intercept=self.getIntercept())
if self.getIntercept():
output[i] = out[1:]
else:
output[i] = out
return output
def robustProcessCMHadi(self, numWindows, obs, reg):
# do the robust processing for a single evaluation frequency
# create array for output
output = np.empty(shape=(self.getOutSize(), self.getInSize()), dtype="complex")
# solve
for i in xrange(0, self.getOutSize()):
observation = obs[i,:]
predictors = reg[i,:,:]
# save the output
out, resids, weights = chatterjeeMachlerHadi(predictors, observation, intercept=self.getIntercept())
# now take the weights, apply to the observations and predictors, stack the appropriate rows and test
observation2 = np.zeros(shape=(self.getRemoteSize()), dtype="complex")
predictors2 = np.zeros(shape=(self.getRemoteSize(), self.getInSize()), dtype="complex")
for iChan in xrange(0, self.getRemoteSize()):
# now need to have my indexing array
indexArray = np.arange(iChan, numWindows*self.getRemoteSize(), self.getRemoteSize())
weightsLim = weights[indexArray]
# weightsLim = weightsLim/np.sum(weightsLim) # normalise weights to 1
observation2[iChan] = np.sum(obs[i, indexArray]*weightsLim)/numWindows
# now for the regressors
for j in xrange(0, self.getInSize()):
predictors2[iChan, j] = np.sum(reg[i, indexArray, j]*weightsLim)/numWindows
out, resids, weights = chatterjeeMachlerMod(predictors2, observation2, intercept=self.getIntercept())
if self.getIntercept():
output[i] = out[1:]
else:
output[i] = out
return output
def robustProcessOLS(self, numWindows, obs, reg):
# do the robust processing for a single evaluation frequency
# create array for output
output = np.empty(shape=(self.getOutSize(), self.getInSize()), dtype="complex")
# solve
for i in xrange(0, self.getOutSize()):
observation = obs[i,:]
predictors = reg[i,:,:]
# save the output
out, resids, squareResid, rank, s = olsModel(predictors, observation, intercept=self.getIntercept())
if self.getIntercept():
output[i] = out[1:]
else:
output[i] = out
return output
def stackedProcess(self, data):
# then do various sums
numWindows = data.shape[0]
numWindows, data = self.checkForBadValues(numWindows, data)
# unweighted sum (i.e. normal solution)
unWeightedSum = np.sum(data, axis=0)
unWeightedSum = unWeightedSum/numWindows
# for each output variable, have ninput regressor variables
# let's construct our arrays
obs = np.empty(shape=(self.getOutSize(), self.getRemoteSize()), dtype="complex")
reg = np.empty(shape=(self.getOutSize(), self.getRemoteSize(), self.getInSize()), dtype="complex")
for i in xrange(0, self.getOutSize()):
for j in xrange(0, self.getRemoteSize()):
obs[i, j] = unWeightedSum[self.getInSize() + i, j]
for k in xrange(0, self.getInSize()):
reg[i, j, k] = unWeightedSum[k, j]
# create array for output
output = np.empty(shape=(self.getOutSize(), self.getInSize()), dtype="complex")
for i in xrange(0, self.getOutSize()):
observation = obs[i,:]
predictors = reg[i,:,:]
# save the output
out, resids, scale, weights = mmestimateModel(predictors, observation, intercept=self.getIntercept())
if self.getIntercept():
output[i] = out[1:]
else:
output[i] = out
return output
def checkRemote(self):
check = True
check = check and self.getRemoteSize() == self.getInSize()
check = check and self.getRemoteChannels() == self.getInChannels()
return check
###################
### DEBUG
##################
def printInfo(self):
self.printText("####################")
self.printText("REMOTE REFERENCE PROCESSOR INFO BEGIN")
self.printText("####################")
self.printText("In Site = {}".format(self.getInSite()))
self.printText("In Channels = {}".format(self.getInChannels()))
self.printText("Out Site = {}".format(self.getOutSite()))
self.printText("Out Channels = {}".format(self.getOutChannels()))
self.printText("Remote Site = {}".format(self.getRemoteSite()))
self.printText("Remote Channels = {}".format(self.getRemoteChannels()))
self.printText("####################")
self.printText("REMOTE REFERENCE PROCESSOR INFO END")
self.printText("####################")
def printText(self, infoStr):
generalPrint("Remote Reference Processor Info", infoStr)
def printWarning(self, warnStr):
warningPrint("Remote Reference Processor Warning", warnStr)
| |
"""
Registration tools
"""
import os
import os.path as op
import numpy as np
import nibabel as nib
from dipy.align.imwarp import DiffeomorphicMap
from dipy.align import (syn_registration, center_of_mass, translation,
rigid, affine, register_series, )
import dipy.core.gradients as dpg
from dipy.align.streamlinear import whole_brain_slr
import AFQ.utils.models as mut
__all__ = ["syn_register_dwi", "write_mapping", "read_mapping",
"register_dwi", "slr_registration"]
def reduce_shape(shape):
"""
Reduce dimension in shape to 3 if possible
"""
try:
return shape[:3]
except TypeError:
return shape
def syn_register_dwi(dwi, gtab, template=None, **syn_kwargs):
"""
Register DWI data to a template.
Parameters
-----------
dwi : nifti image or str
Image containing DWI data, or full path to a nifti file with DWI.
gtab : GradientTable or list of strings
The gradients associated with the DWI data, or a string with [fbcal, ]
template : nifti image or str, optional
syn_kwargs : key-word arguments for :func:`syn_registration`
Returns
-------
DiffeomorphicMap object
"""
if template is None:
import AFQ.data.fetch as afd
template = afd.read_mni_template()
if isinstance(template, str):
template = nib.load(template)
template_data = template.get_fdata()
template_affine = template.affine
if isinstance(dwi, str):
dwi = nib.load(dwi)
if not isinstance(gtab, dpg.GradientTable):
gtab = dpg.gradient_table(*gtab)
dwi_affine = dwi.affine
dwi_data = dwi.get_fdata()
mean_b0 = np.mean(dwi_data[..., gtab.b0s_mask], -1)
warped_b0, mapping = syn_registration(mean_b0, template_data,
moving_affine=dwi_affine,
static_affine=template_affine,
**syn_kwargs)
return warped_b0, mapping
def write_mapping(mapping, fname):
"""
Write out a syn registration mapping to file
Parameters
----------
mapping : a DiffeomorphicMap object derived from :func:`syn_registration`
fname : str
Full path to the nifti file storing the mapping
"""
if isinstance(mapping, DiffeomorphicMap):
mapping_imap = np.array([mapping.forward.T, mapping.backward.T]).T
nib.save(nib.Nifti1Image(mapping_imap, mapping.codomain_world2grid),
fname)
else:
np.save(fname, mapping.affine)
def read_mapping(disp, domain_img, codomain_img, prealign=None):
"""
Read a syn registration mapping from a nifti file
Parameters
----------
disp : str, Nifti1Image, or ndarray
If string, file must of an image or ndarray.
If image, contains the mapping displacement field in each voxel
Shape (x, y, z, 3, 2)
If ndarray, contains affine transformation used for mapping
domain_img : str or Nifti1Image
codomain_img : str or Nifti1Image
Returns
-------
A :class:`DiffeomorphicMap` object
"""
if isinstance(disp, str):
if "nii.gz" in disp:
disp = nib.load(disp)
else:
disp = np.load(disp)
if isinstance(domain_img, str):
domain_img = nib.load(domain_img)
if isinstance(codomain_img, str):
codomain_img = nib.load(codomain_img)
if isinstance(disp, nib.Nifti1Image):
mapping = DiffeomorphicMap(3, disp.shape[:3],
disp_grid2world=np.linalg.inv(disp.affine),
domain_shape=domain_img.shape[:3],
domain_grid2world=domain_img.affine,
codomain_shape=codomain_img.shape,
codomain_grid2world=codomain_img.affine,
prealign=prealign)
disp_data = disp.get_fdata().astype(np.float32)
mapping.forward = disp_data[..., 0]
mapping.backward = disp_data[..., 1]
mapping.is_inverse = True
else:
from AFQ.definitions.mapping import ConformedAffineMapping
mapping = ConformedAffineMapping(
disp,
domain_grid_shape=reduce_shape(
domain_img.shape),
domain_grid2world=domain_img.affine,
codomain_grid_shape=reduce_shape(
codomain_img.shape),
codomain_grid2world=codomain_img.affine)
return mapping
def register_dwi(data_files, bval_files, bvec_files,
b0_ref=0,
pipeline=[center_of_mass, translation, rigid, affine],
out_dir=None):
"""
Register a DWI data-set
Parameters
----------
data_files : str or list
Files containing DWI data. If this is a str, that's the full path to a
single file. If it's a list, each entry is a full path.
bval_files : str or list
Equivalent to `data_files`.
bvec_files : str or list
Equivalent to `data_files`.
"""
img, data, gtab, mask = mut.prepare_data(data_files,
bval_files,
bvec_files)
if np.sum(gtab.b0s_mask) > 1:
# First, register the b0s into one image:
b0_img = nib.Nifti1Image(data[..., gtab.b0s_mask], img.affine)
trans_b0 = register_series(b0_img, ref=b0_ref, pipeline=pipeline)
ref_data = np.mean(trans_b0, -1)
else:
ref_data = data[..., gtab.b0s_mask]
# Construct a series out of the DWI and the registered mean B0:
series = nib.Nifti1Image(np.concatenate([ref_data,
data[...,
~gtab.b0s_mask]], -1),
img.affine)
transformed_list, affine_list = register_series(series, ref=0,
pipeline=pipeline)
reg_img = nib.Nifti1Image(np.array(transformed_list),
img.affine)
if out_dir is None:
out_dir = op.join(op.split(data_files)[0], 'registered')
if not op.exists(out_dir):
os.makedirs(out_dir)
path = op.join(out_dir, 'registered.nii.gz')
nib.save(reg_img, path)
return path
def slr_registration(moving_data, static_data,
moving_affine=None, static_affine=None,
moving_shape=None, static_shape=None, **kwargs):
"""Register a source image (moving) to a target image (static).
Parameters
----------
moving : ndarray
The source tractography data to be registered
moving_affine : ndarray
The affine associated with the moving (source) data.
moving_shape : ndarray
The shape of the space associated with the static (target) data.
static : ndarray
The target tractography data for registration
static_affine : ndarray
The affine associated with the static (target) data.
static_shape : ndarray
The shape of the space associated with the static (target) data.
**kwargs:
kwargs are passed into whole_brain_slr
Returns
-------
AffineMap
"""
from AFQ.definitions.mapping import ConformedAffineMapping
_, transform, _, _ = whole_brain_slr(
static_data, moving_data, x0='affine', verbose=False, **kwargs)
return ConformedAffineMapping(
transform,
codomain_grid_shape=reduce_shape(static_shape),
codomain_grid2world=static_affine,
domain_grid_shape=reduce_shape(moving_shape),
domain_grid2world=moving_affine)
| |
# -*- coding: utf-8 -*-
# File: model.py
import tensorflow as tf
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils.argscope import argscope
from tensorpack.tfutils.scope_utils import under_name_scope
from tensorpack.models import (
Conv2D, FullyConnected, layer_register)
from tensorpack.utils.argtools import memoized
from basemodel import GroupNorm
from utils.box_ops import pairwise_iou
from model_box import encode_bbox_target, decode_bbox_target
from config import config as cfg
@under_name_scope()
def proposal_metrics(iou):
"""
Add summaries for RPN proposals.
Args:
iou: nxm, #proposal x #gt
"""
# find best roi for each gt, for summary only
best_iou = tf.reduce_max(iou, axis=0)
mean_best_iou = tf.reduce_mean(best_iou, name='best_iou_per_gt')
summaries = [mean_best_iou]
with tf.device('/cpu:0'):
for th in [0.3, 0.5]:
recall = tf.truediv(
tf.count_nonzero(best_iou >= th),
tf.size(best_iou, out_type=tf.int64),
name='recall_iou{}'.format(th))
summaries.append(recall)
add_moving_summary(*summaries)
@under_name_scope()
def sample_fast_rcnn_targets(boxes, gt_boxes, gt_labels):
"""
Sample some ROIs from all proposals for training.
#fg is guaranteed to be > 0, because grount truth boxes are added as RoIs.
Args:
boxes: nx4 region proposals, floatbox
gt_boxes: mx4, floatbox
gt_labels: m, int32
Returns:
A BoxProposals instance.
sampled_boxes: tx4 floatbox, the rois
sampled_labels: t int64 labels, in [0, #class). Positive means foreground.
fg_inds_wrt_gt: #fg indices, each in range [0, m-1].
It contains the matching GT of each foreground roi.
"""
iou = pairwise_iou(boxes, gt_boxes) # nxm
proposal_metrics(iou)
# add ground truth as proposals as well
boxes = tf.concat([boxes, gt_boxes], axis=0) # (n+m) x 4
iou = tf.concat([iou, tf.eye(tf.shape(gt_boxes)[0])], axis=0) # (n+m) x m
# #proposal=n+m from now on
def sample_fg_bg(iou):
fg_mask = tf.reduce_max(iou, axis=1) >= cfg.FRCNN.FG_THRESH
fg_inds = tf.reshape(tf.where(fg_mask), [-1])
num_fg = tf.minimum(int(
cfg.FRCNN.BATCH_PER_IM * cfg.FRCNN.FG_RATIO),
tf.size(fg_inds), name='num_fg')
fg_inds = tf.random_shuffle(fg_inds)[:num_fg]
bg_inds = tf.reshape(tf.where(tf.logical_not(fg_mask)), [-1])
num_bg = tf.minimum(
cfg.FRCNN.BATCH_PER_IM - num_fg,
tf.size(bg_inds), name='num_bg')
bg_inds = tf.random_shuffle(bg_inds)[:num_bg]
add_moving_summary(num_fg, num_bg)
return fg_inds, bg_inds
fg_inds, bg_inds = sample_fg_bg(iou)
# fg,bg indices w.r.t proposals
best_iou_ind = tf.argmax(iou, axis=1) # #proposal, each in 0~m-1
fg_inds_wrt_gt = tf.gather(best_iou_ind, fg_inds) # num_fg
all_indices = tf.concat([fg_inds, bg_inds], axis=0) # indices w.r.t all n+m proposal boxes
ret_boxes = tf.gather(boxes, all_indices)
ret_labels = tf.concat(
[tf.gather(gt_labels, fg_inds_wrt_gt),
tf.zeros_like(bg_inds, dtype=tf.int64)], axis=0)
# stop the gradient -- they are meant to be training targets
return BoxProposals(
tf.stop_gradient(ret_boxes, name='sampled_proposal_boxes'),
tf.stop_gradient(ret_labels, name='sampled_labels'),
tf.stop_gradient(fg_inds_wrt_gt),
gt_boxes, gt_labels)
@layer_register(log_shape=True)
def fastrcnn_outputs(feature, num_classes, class_agnostic_regression=False):
"""
Args:
feature (any shape):
num_classes(int): num_category + 1
class_agnostic_regression (bool): if True, regression to N x 1 x 4
Returns:
cls_logits: N x num_class classification logits
reg_logits: N x num_classx4 or Nx2x4 if class agnostic
"""
classification = FullyConnected(
'class', feature, num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=0.01))
num_classes_for_box = 1 if class_agnostic_regression else num_classes
box_regression = FullyConnected(
'box', feature, num_classes_for_box * 4,
kernel_initializer=tf.random_normal_initializer(stddev=0.001))
box_regression = tf.reshape(box_regression, (-1, num_classes_for_box, 4), name='output_box')
return classification, box_regression
@under_name_scope()
def fastrcnn_losses(labels, label_logits, fg_boxes, fg_box_logits):
"""
Args:
labels: n,
label_logits: nxC
fg_boxes: nfgx4, encoded
fg_box_logits: nfgxCx4 or nfgx1x4 if class agnostic
Returns:
label_loss, box_loss
"""
label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=label_logits)
label_loss = tf.reduce_mean(label_loss, name='label_loss')
fg_inds = tf.where(labels > 0)[:, 0]
fg_labels = tf.gather(labels, fg_inds)
num_fg = tf.size(fg_inds, out_type=tf.int64)
empty_fg = tf.equal(num_fg, 0)
if int(fg_box_logits.shape[1]) > 1:
indices = tf.stack(
[tf.range(num_fg), fg_labels], axis=1) # #fgx2
fg_box_logits = tf.gather_nd(fg_box_logits, indices)
else:
fg_box_logits = tf.reshape(fg_box_logits, [-1, 4])
with tf.name_scope('label_metrics'), tf.device('/cpu:0'):
prediction = tf.argmax(label_logits, axis=1, name='label_prediction')
correct = tf.to_float(tf.equal(prediction, labels)) # boolean/integer gather is unavailable on GPU
accuracy = tf.reduce_mean(correct, name='accuracy')
fg_label_pred = tf.argmax(tf.gather(label_logits, fg_inds), axis=1)
num_zero = tf.reduce_sum(tf.to_int64(tf.equal(fg_label_pred, 0)), name='num_zero')
false_negative = tf.where(
empty_fg, 0., tf.to_float(tf.truediv(num_zero, num_fg)), name='false_negative')
fg_accuracy = tf.where(
empty_fg, 0., tf.reduce_mean(tf.gather(correct, fg_inds)), name='fg_accuracy')
box_loss = tf.losses.huber_loss(
fg_boxes, fg_box_logits, reduction=tf.losses.Reduction.SUM)
box_loss = tf.truediv(
box_loss, tf.to_float(tf.shape(labels)[0]), name='box_loss')
add_moving_summary(label_loss, box_loss, accuracy,
fg_accuracy, false_negative, tf.to_float(num_fg, name='num_fg_label'))
return label_loss, box_loss
@under_name_scope()
def fastrcnn_predictions(boxes, scores):
"""
Generate final results from predictions of all proposals.
Args:
boxes: n#classx4 floatbox in float32
scores: nx#class
Returns:
boxes: Kx4
scores: K
labels: K
"""
assert boxes.shape[1] == cfg.DATA.NUM_CLASS
assert scores.shape[1] == cfg.DATA.NUM_CLASS
boxes = tf.transpose(boxes, [1, 0, 2])[1:, :, :] # #catxnx4
boxes.set_shape([None, cfg.DATA.NUM_CATEGORY, None])
scores = tf.transpose(scores[:, 1:], [1, 0]) # #catxn
def f(X):
"""
prob: n probabilities
box: nx4 boxes
Returns: n boolean, the selection
"""
prob, box = X
output_shape = tf.shape(prob)
# filter by score threshold
ids = tf.reshape(tf.where(prob > cfg.TEST.RESULT_SCORE_THRESH), [-1])
prob = tf.gather(prob, ids)
box = tf.gather(box, ids)
# NMS within each class
selection = tf.image.non_max_suppression(
box, prob, cfg.TEST.RESULTS_PER_IM, cfg.TEST.FRCNN_NMS_THRESH)
selection = tf.to_int32(tf.gather(ids, selection))
# sort available in TF>1.4.0
# sorted_selection = tf.contrib.framework.sort(selection, direction='ASCENDING')
sorted_selection = -tf.nn.top_k(-selection, k=tf.size(selection))[0]
mask = tf.sparse_to_dense(
sparse_indices=sorted_selection,
output_shape=output_shape,
sparse_values=True,
default_value=False)
return mask
masks = tf.map_fn(f, (scores, boxes), dtype=tf.bool,
parallel_iterations=10) # #cat x N
selected_indices = tf.where(masks) # #selection x 2, each is (cat_id, box_id)
scores = tf.boolean_mask(scores, masks)
# filter again by sorting scores
topk_scores, topk_indices = tf.nn.top_k(
scores,
tf.minimum(cfg.TEST.RESULTS_PER_IM, tf.size(scores)),
sorted=False)
filtered_selection = tf.gather(selected_indices, topk_indices)
cat_ids, box_ids = tf.unstack(filtered_selection, axis=1)
final_scores = tf.identity(topk_scores, name='scores')
final_labels = tf.add(cat_ids, 1, name='labels')
final_ids = tf.stack([cat_ids, box_ids], axis=1, name='all_ids')
final_boxes = tf.gather_nd(boxes, final_ids, name='boxes')
return final_boxes, final_scores, final_labels
"""
FastRCNN heads for FPN:
"""
@layer_register(log_shape=True)
def fastrcnn_2fc_head(feature):
"""
Args:
feature (any shape):
Returns:
2D head feature
"""
dim = cfg.FPN.FRCNN_FC_HEAD_DIM
init = tf.variance_scaling_initializer()
hidden = FullyConnected('fc6', feature, dim, kernel_initializer=init, activation=tf.nn.relu)
hidden = FullyConnected('fc7', hidden, dim, kernel_initializer=init, activation=tf.nn.relu)
return hidden
@layer_register(log_shape=True)
def fastrcnn_Xconv1fc_head(feature, num_convs, norm=None):
"""
Args:
feature (NCHW):
num_classes(int): num_category + 1
num_convs (int): number of conv layers
norm (str or None): either None or 'GN'
Returns:
2D head feature
"""
assert norm in [None, 'GN'], norm
l = feature
with argscope(Conv2D, data_format='channels_first',
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out', distribution='normal')):
for k in range(num_convs):
l = Conv2D('conv{}'.format(k), l, cfg.FPN.FRCNN_CONV_HEAD_DIM, 3, activation=tf.nn.relu)
if norm is not None:
l = GroupNorm('gn{}'.format(k), l)
l = FullyConnected('fc', l, cfg.FPN.FRCNN_FC_HEAD_DIM,
kernel_initializer=tf.variance_scaling_initializer(), activation=tf.nn.relu)
return l
def fastrcnn_4conv1fc_head(*args, **kwargs):
return fastrcnn_Xconv1fc_head(*args, num_convs=4, **kwargs)
def fastrcnn_4conv1fc_gn_head(*args, **kwargs):
return fastrcnn_Xconv1fc_head(*args, num_convs=4, norm='GN', **kwargs)
class BoxProposals(object):
"""
A structure to manage box proposals and their relations with ground truth.
"""
def __init__(self, boxes,
labels=None, fg_inds_wrt_gt=None,
gt_boxes=None, gt_labels=None):
"""
Args:
boxes: Nx4
labels: N, each in [0, #class), the true label for each input box
fg_inds_wrt_gt: #fg, each in [0, M)
gt_boxes: Mx4
gt_labels: M
The last four arguments could be None when not training.
"""
for k, v in locals().items():
if k != 'self' and v is not None:
setattr(self, k, v)
@memoized
def fg_inds(self):
""" Returns: #fg indices in [0, N-1] """
return tf.reshape(tf.where(self.labels > 0), [-1], name='fg_inds')
@memoized
def fg_boxes(self):
""" Returns: #fg x4"""
return tf.gather(self.boxes, self.fg_inds(), name='fg_boxes')
@memoized
def fg_labels(self):
""" Returns: #fg"""
return tf.gather(self.labels, self.fg_inds(), name='fg_labels')
@memoized
def matched_gt_boxes(self):
""" Returns: #fg x 4"""
return tf.gather(self.gt_boxes, self.fg_inds_wrt_gt)
class FastRCNNHead(object):
"""
A class to process & decode inputs/outputs of a fastrcnn classification+regression head.
"""
def __init__(self, proposals, box_logits, label_logits, bbox_regression_weights):
"""
Args:
proposals: BoxProposals
box_logits: Nx#classx4 or Nx1x4, the output of the head
label_logits: Nx#class, the output of the head
bbox_regression_weights: a 4 element tensor
"""
for k, v in locals().items():
if k != 'self' and v is not None:
setattr(self, k, v)
self._bbox_class_agnostic = int(box_logits.shape[1]) == 1
@memoized
def fg_box_logits(self):
""" Returns: #fg x ? x 4 """
return tf.gather(self.box_logits, self.proposals.fg_inds(), name='fg_box_logits')
@memoized
def losses(self):
encoded_fg_gt_boxes = encode_bbox_target(
self.proposals.matched_gt_boxes(),
self.proposals.fg_boxes()) * self.bbox_regression_weights
return fastrcnn_losses(
self.proposals.labels, self.label_logits,
encoded_fg_gt_boxes, self.fg_box_logits()
)
@memoized
def decoded_output_boxes(self):
""" Returns: N x #class x 4 """
anchors = tf.tile(tf.expand_dims(self.proposals.boxes, 1),
[1, cfg.DATA.NUM_CLASS, 1]) # N x #class x 4
decoded_boxes = decode_bbox_target(
self.box_logits / self.bbox_regression_weights,
anchors
)
return decoded_boxes
@memoized
def decoded_output_boxes_for_true_label(self):
""" Returns: Nx4 decoded boxes """
return self._decoded_output_boxes_for_label(self.proposals.labels)
@memoized
def decoded_output_boxes_for_predicted_label(self):
""" Returns: Nx4 decoded boxes """
return self._decoded_output_boxes_for_label(self.predicted_labels())
@memoized
def decoded_output_boxes_for_label(self, labels):
assert not self._bbox_class_agnostic
indices = tf.stack([
tf.range(tf.size(labels, out_type=tf.int64)),
labels
])
needed_logits = tf.gather_nd(self.box_logits, indices)
decoded = decode_bbox_target(
needed_logits / self.bbox_regression_weights,
self.proposals.boxes
)
return decoded
@memoized
def decoded_output_boxes_class_agnostic(self):
""" Returns: Nx4 """
assert self._bbox_class_agnostic
box_logits = tf.reshape(self.box_logits, [-1, 4])
decoded = decode_bbox_target(
box_logits / self.bbox_regression_weights,
self.proposals.boxes
)
return decoded
@memoized
def output_scores(self, name=None):
""" Returns: N x #class scores, summed to one for each box."""
return tf.nn.softmax(self.label_logits, name=name)
@memoized
def predicted_labels(self):
""" Returns: N ints """
return tf.argmax(self.label_logits, axis=1, name='predicted_labels')
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for reduce operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
def make_reduce_tests(reduce_op,
min_value=-10,
max_value=10,
boolean_tensor_only=False,
allow_fully_quantize=False):
"""Make a set of tests to do reduce operation.
Args:
reduce_op: TensorFlow reduce operation to test, i.e. `tf.reduce_mean`.
min_value: min value for created tensor data.
max_value: max value for created tensor data.
boolean_tensor_only: If true, will only generate tensor with boolean value.
allow_fully_quantize: bool, whether fully_quantize is allowed.
Returns:
a function representing the true generator with `reduce_op_in` curried.
"""
def f(options):
"""Actual function that generates examples."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[3, 3, 2, 4]],
"axis": [
0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
[2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1],
[-1, 0], [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
],
"const_axis": [True, False],
"keepdims": [True, False],
"fully_quantize": [False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[1, 8, 8, 3]],
"axis": [
0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2,
3], [3, 2, 1, 0],
[3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2, -3, -4,
[0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],
[2, 2, 3], [-3, -3, -4], [-3, 2, 1]
],
"const_axis": [True, False],
"keepdims": [True, False],
"fully_quantize": [False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [[]], # shape is: [0]
"const_axis": [False],
"keepdims": [True, False],
"fully_quantize": [False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [None], # shape is: []
"const_axis": [True],
"keepdims": [True, False],
"fully_quantize": [False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[3, 3, 2, 4]],
"axis": [
0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
[2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1],
[-1, 0], [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
],
"const_axis": [True],
"keepdims": [True, False],
"fully_quantize": [True],
},
{
"input_dtype": [tf.float32],
"input_shape": [[1, 8, 8, 4], [1, 8, 8, 3]],
"axis": [
0, 1, 2, 3, [0], [1], [2], [3], [-1], [-2], [-3], [1, 2],
[0, 3], [1, 2, 3], [1, 3], [2, 3]
],
"const_axis": [True],
"keepdims": [True, False],
"fully_quantize": [True],
},
]
# test_parameters include fully_quantize option only when
# allow_fully_quantize is True.
if not allow_fully_quantize:
test_parameters = [
test_parameter for test_parameter in test_parameters
if True not in test_parameter["fully_quantize"]
]
def build_graph(parameters):
"""Build the mean op testing graph."""
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
input_tensor = tf.compat.v1.placeholder(
dtype=dtype, name="input", shape=parameters["input_shape"])
# Get axis as either a placeholder or constants.
if parameters["const_axis"]:
axis = parameters["axis"]
input_tensors = [input_tensor]
else:
if isinstance(parameters["axis"], list):
shape = [len(parameters["axis"])]
else:
shape = [] # shape for None or integers.
axis = tf.compat.v1.placeholder(
dtype=tf.int32, name="axis", shape=shape)
input_tensors = [input_tensor, axis]
out = reduce_op(input_tensor, axis=axis, keepdims=parameters["keepdims"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for reduced operators."""
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
values = [
create_tensor_data(
dtype,
parameters["input_shape"],
min_value=min_value,
max_value=max_value)
]
if not parameters["const_axis"]:
values.append(np.array(parameters["axis"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_mean_tests(options):
"""Make a set of tests to do mean."""
return make_reduce_tests(
tf.reduce_mean,
min_value=-1,
max_value=1,
boolean_tensor_only=False,
allow_fully_quantize=True)(
options)
@register_make_test_function()
def make_sum_tests(options):
"""Make a set of tests to do sum."""
return make_reduce_tests(
tf.reduce_sum,
min_value=-1,
max_value=1,
boolean_tensor_only=False,
allow_fully_quantize=True)(
options)
@register_make_test_function()
def make_reduce_prod_tests(options):
"""Make a set of tests to do prod."""
# set min max value to be -2, 2 to avoid overflow.
return make_reduce_tests(tf.reduce_prod, -2, 2)(options)
@register_make_test_function()
def make_reduce_max_tests(options):
"""Make a set of tests to do max."""
return make_reduce_tests(tf.reduce_max)(options)
@register_make_test_function()
def make_reduce_min_tests(options):
"""Make a set of tests to do min."""
return make_reduce_tests(tf.reduce_min)(options)
@register_make_test_function()
def make_reduce_any_tests(options):
"""Make a set of tests to do any."""
return make_reduce_tests(tf.reduce_any, boolean_tensor_only=True)(options)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Array operations for RaggedTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_ragged_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged import segment_id_ops
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
#===============================================================================
# Masking
#===============================================================================
@tf_export('ragged.boolean_mask')
@dispatch.add_dispatch_support
def boolean_mask(data, mask, name=None):
"""Applies a boolean mask to `data` without flattening the mask dimensions.
Returns a potentially ragged tensor that is formed by retaining the elements
in `data` where the corresponding value in `mask` is `True`.
* `output[a1...aA, i, b1...bB] = data[a1...aA, j, b1...bB]`
Where `j` is the `i`th `True` entry of `mask[a1...aA]`.
Note that `output` preserves the mask dimensions `a1...aA`; this differs
from `tf.boolean_mask`, which flattens those dimensions.
Args:
data: A potentially ragged tensor.
mask: A potentially ragged boolean tensor. `mask`'s shape must be a prefix
of `data`'s shape. `rank(mask)` must be known statically.
name: A name prefix for the returned tensor (optional).
Returns:
A potentially ragged tensor that is formed by retaining the elements in
`data` where the corresponding value in `mask` is `True`.
* `rank(output) = rank(data)`.
* `output.ragged_rank = max(data.ragged_rank, rank(mask) - 1)`.
Raises:
ValueError: if `rank(mask)` is not known statically; or if `mask.shape` is
not a prefix of `data.shape`.
#### Examples:
>>> # Aliases for True & False so data and mask line up.
>>> T, F = (True, False)
>>> tf.ragged.boolean_mask( # Mask a 2D Tensor.
... data=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... mask=[[T, F, T], [F, F, F], [T, F, F]]).to_list()
[[1, 3], [], [7]]
>>> tf.ragged.boolean_mask( # Mask a 2D RaggedTensor.
... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]),
... tf.ragged.constant([[F, F, T], [F], [T, T]])).to_list()
[[3], [], [5, 6]]
>>> tf.ragged.boolean_mask( # Mask rows of a 2D RaggedTensor.
... tf.ragged.constant([[1, 2, 3], [4], [5, 6]]),
... tf.ragged.constant([True, False, True])).to_list()
[[1, 2, 3], [5, 6]]
"""
with ops.name_scope(name, 'RaggedMask', [data, mask]):
# Convert inputs to tensors.
data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')
mask = ragged_tensor.convert_to_tensor_or_ragged_tensor(
mask, dtypes.bool, name='mask')
row_splits_dtype, (data, mask) = ragged_tensor.match_row_splits_dtypes(
data, mask, return_dtype=True)
# Get static rank of mask.
if mask.shape.ndims is None:
raise ValueError('mask.shape.ndims must be known statically.')
elif mask.shape.ndims == 0:
raise ValueError('mask cannot be scalar.')
# If mask is ragged, then recurse with a non-ragged mask.
if ragged_tensor.is_ragged(mask):
if not ragged_tensor.is_ragged(data):
data = ragged_tensor.RaggedTensor.from_tensor(
data,
ragged_rank=mask.ragged_rank,
row_splits_dtype=mask.row_splits.dtype)
# Check that mask.nested_row_splits is a prefix of
# data.nested_row_splits.
splits_list = [
mask.nested_row_splits, data.nested_row_splits[:mask.ragged_rank]
]
with ops.control_dependencies(
ragged_util.assert_splits_match(splits_list)):
# Strip off ragged `splits` until `mask` is non-ragged. Keep the splits
# that we strip off in `splits`, so we can add them back on after
# we recursively mask the non-ragged data.
splits = []
while ragged_tensor.is_ragged(mask):
if mask.shape.ndims > 2:
splits.append(mask.row_splits)
else:
# Count the number of True mask values in each row to find the
# lengths of the filtered rows; then convert to splits.
int_mask = ragged_functional_ops.map_flat_values(
math_ops.cast, mask, dtype=row_splits_dtype)
masked_row_lengths = ragged_math_ops.reduce_sum(int_mask, axis=1)
splits.append(ragged_util.lengths_to_splits(masked_row_lengths))
mask = mask.values
data = data.values
# Recursively apply the nested non-ragged mask to the nested data.
masked_values = boolean_mask(data, mask)
# Add the ragged `splits` back to the result.
masked_values = ragged_tensor.RaggedTensor.from_nested_row_splits(
masked_values, splits, validate=False)
return masked_values
# If mask is non-ragged and has rank 1, and data is ragged, then build a
# ragged tensor with the indicated rows.
elif ragged_tensor.is_ragged(data) and mask.shape.ndims == 1:
# Get the masked splits: first get the length of each row, then filter
# out the rows that we are deleting, and convert that filtered set of
# masks back to a splits tensor.
lengths = data.row_lengths()
masked_lengths = array_ops.boolean_mask(lengths, mask)
masked_splits = ragged_util.lengths_to_splits(masked_lengths)
# Get the masked values: first get row ids corresponding to each
# value, then use tf.gather to build a boolean mask that's false for
# values that come from rows that we are deleting, and use that mask to
# construct the masked values tensor.
segment_ids = segment_id_ops.row_splits_to_segment_ids(data.row_splits)
segment_mask = array_ops.gather(mask, segment_ids)
masked_values = boolean_mask(data.values, segment_mask)
return ragged_tensor.RaggedTensor.from_row_splits(
masked_values, masked_splits, validate=False)
# If mask is non-ragged and has rank>1, then convert it to be ragged,
# with a ragged rank matching data.
if ragged_tensor.is_ragged(data):
mask = ragged_tensor.RaggedTensor.from_tensor(
mask,
ragged_rank=min(data.ragged_rank, mask.shape.ndims - 1),
row_splits_dtype=data.row_splits.dtype)
return boolean_mask(data, mask)
# Otherwise, data and mask are both `Tensor`s.
else:
# Apply `boolean_mask` to get the masked values.
masked_values = array_ops.boolean_mask(data, mask)
if mask.shape.ndims >= 2:
# Add the innermost ragged dimension. For each innermost cell, get the
# number of values it contains. Then flatten that to get a list of
# cell lengths, and convert it to splits. Finally, combine the splits
# and values to get the innermost ragged tensor.
masked_lengths = math_ops.count_nonzero(
mask, axis=-1, dtype=row_splits_dtype)
flattened_masked_lengths = array_ops.reshape(masked_lengths, [-1])
masked_values = ragged_tensor.RaggedTensor.from_row_lengths(
masked_values, flattened_masked_lengths, validate=False)
# Wrap remaining ragged dimensions.
if mask.shape.ndims > 2:
mask_shape = array_ops.shape(mask, out_type=row_splits_dtype)
split_size = math_ops.cumprod(mask_shape) + 1
for dim in range(mask.shape.ndims - 3, -1, -1):
elt_size = mask_shape[dim + 1]
masked_splits = math_ops.range(split_size[dim]) * elt_size
masked_values = ragged_tensor.RaggedTensor.from_row_splits(
masked_values, masked_splits, validate=False)
return masked_values
#===============================================================================
# Tiling
#===============================================================================
def tile(input, multiples, name=None): # pylint: disable=redefined-builtin
"""Constructs a `RaggedTensor` by tiling a given `RaggedTensor`.
The values of `input` are replicated `multiples[i]` times along the
`i`th dimension (for each dimension `i`). For every dimension `axis` in
`input`, the length of each output element in that dimension is the
length of corresponding input element multiplied by `multiples[axis]`.
Args:
input: A `RaggedTensor`.
multiples: A 1-D integer `Tensor`. Length must be the same as the number of
dimensions in `input`.
name: A name for the operation (optional).
Returns:
A `RaggedTensor` with the same type, rank, and ragged_rank as `input`.
#### Example:
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> tf.tile(rt, [3, 2]).to_list()
[[1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3], [1, 2, 1, 2], [3, 3]]
"""
with ops.name_scope(name, 'RaggedTile', [input, multiples]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, name='input')
if not ragged_tensor.is_ragged(input):
return array_ops.tile(input, multiples, name)
multiples = ragged_util.convert_to_int_tensor(
multiples, name='multiples', dtype=input.row_splits.dtype)
multiples.shape.assert_has_rank(1)
# If the constant value of `multiples` is available, then we can use it
# to skip tiling dimensions where `multiples=1`.
const_multiples = tensor_util.constant_value(multiples)
return ragged_tensor.RaggedTensor.from_nested_row_splits(
_tile_ragged_values(input, multiples, const_multiples),
_tile_ragged_splits(input, multiples, const_multiples),
validate=False)
def _tile_ragged_values(rt_input, multiples, const_multiples=None):
"""Builds flat_values tensor for a tiled `RaggedTensor`.
Returns a tensor that repeats the values in
`rt_input.flat_values` in the
appropriate pattern to construct a `RaggedTensor` that tiles `rt_input` as
specified by `multiples`.
Args:
rt_input: The `RaggedTensor` whose values should be repeated.
multiples: A 1-D integer `tensor`, indicating how many times each dimension
should be repeated.
const_multiples: Optional constant value for multiples. Used to skip tiling
dimensions where `multiples=1`.
Returns:
A `Tensor` with the same type and rank as `rt_input.flat_values`.
#### Example:
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> _tile_ragged_values(rt, tf.constant([3, 2])).numpy()
array([1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3, 1, 2, 1, 2, 3, 3], dtype=int32)
"""
ragged_rank = rt_input.ragged_rank
nested_splits = rt_input.nested_row_splits
# Pointers to the values in `rt_input.flat_values`.
inner_value_ids = math_ops.range(nested_splits[-1][-1])
# For each ragged dimension (working from the innermost to outermost),
# expand `inner_value_ids` as necessary to tile that dimension.
prev_splits = None
for axis in range(ragged_rank, 0, -1):
# Ragged splits for this dimension.
splits = nested_splits[axis - 1]
# Adjust splits so they point into `inner_value_ids` (instead of just
# pointing into the next dimension's values).
if prev_splits is not None: # Not the first pass through the loop.
splits = array_ops.gather(prev_splits * multiples[axis + 1], splits)
# Repeat each element in this ragged dimension `multiples[axis]` times.
if const_multiples is None or const_multiples[axis] != 1:
inner_value_ids = ragged_util.repeat_ranges(inner_value_ids, splits,
multiples[axis])
prev_splits = splits
# Gather the tiled inner values.
ragged_tiled_values = array_ops.gather(rt_input.flat_values, inner_value_ids)
# Tile the flat_values for the uniform dimensions (i.e., for `axis=0` plus
# `axis=range(ragged_rank, rank)`).
inner_repeats = array_ops.concat([multiples[:1], multiples[ragged_rank + 1:]],
axis=0)
return array_ops.tile(ragged_tiled_values, inner_repeats)
def _tile_ragged_splits(rt_input, multiples, const_multiples=None):
"""Builds nested_split tensors for a tiled `RaggedTensor`.
Returns a list of split tensors that can be used to construct the
`RaggedTensor` that tiles `rt_input` as specified by `multiples`.
Args:
rt_input: The `RaggedTensor` that is being tiled.
multiples: A 1-D integer `tensor`, indicating how many times each dimension
should be repeated.
const_multiples: Optional constant value for multiples. Used to skip tiling
dimensions where `multiples=1`.
Returns:
A list of 1-D integer `Tensor`s (one for each ragged dimension in
`rt_input`).
#### Example:
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> _tile_ragged_splits(rt, [3, 2])
[<tf.Tensor: shape=(7,), dtype=int64,
numpy=array([ 0, 4, 6, 10, 12, 16, 18])>]
"""
ragged_rank = rt_input.ragged_rank
nested_splits = rt_input.nested_row_splits
# projected_splits[src_axis, dst_axis] contains the split points that divide
# the rows from src_axis in the list of dst_axis values. E.g.,
# projected_splits[i, i] = nested_splits[i], and
# projected_splits[i, i+1] = gather(nested_splits[i+1], nested_splits[i]).
projected_splits = [{i: nested_splits[i]} for i in range(ragged_rank)]
for src_axis in range(ragged_rank):
for dst_axis in range(src_axis + 1, ragged_rank - 1):
projected_splits[src_axis][dst_axis] = array_ops.gather(
nested_splits[dst_axis], projected_splits[src_axis][dst_axis - 1])
# For each ragged dimension: nested_splits[axis] -> result_splits[axis].
result_splits = []
for axis in range(ragged_rank):
# Get the length of each row for the input tensor for this dimension.
input_lengths = nested_splits[axis][1:] - nested_splits[axis][:-1]
# Multiply those lengths by the `multiples` of dimension axis+1, since
# each value will be repeated that number of times.
output_lengths = input_lengths * multiples[axis + 1]
# Repeat ranges of the row lengths as necessary for them to be tiled in
# each ragged dimension `d < axis`. (Start with dimension d=axis-1, and
# work our way up to dimension d=0.)
repeats = 1
for d in range(axis - 1, -1, -1):
if const_multiples is None or const_multiples[d + 1] != 1:
splits = projected_splits[d][axis - 1] * repeats
output_lengths = ragged_util.repeat_ranges(output_lengths, splits,
multiples[d + 1])
repeats *= multiples[d + 1]
# Tile splits for the outermost (uniform) dimension.
output_lengths = array_ops.tile(output_lengths, multiples[:1])
# Convert to splits.
result_splits.append(ragged_util.lengths_to_splits(output_lengths))
return result_splits
#===============================================================================
# Reshaping
#===============================================================================
def expand_dims(input, axis, name=None): # pylint: disable=redefined-builtin
"""Inserts a dimension with shape 1 into a potentially ragged tensor's shape.
Given a potentially ragged tenor `input`, this operation inserts a
dimension with size 1 at the dimension `axis` of `input`'s shape.
The following table gives some examples showing how `ragged.expand_dims`
impacts the shapes of different input tensors. Ragged dimensions are
indicated by enclosing them in parentheses.
input.shape | axis | result.shape
----------------------- | ---- | -----------------------------
`[D1, D2]` | `0` | `[1, D1, D2]`
`[D1, D2]` | `1` | `[D1, 1, D2]`
`[D1, D2]` | `2` | `[D1, D2, 1]`
`[D1, (D2), (D3), D4]` | `0` | `[1, D1, (D2), (D3), D4]`
`[D1, (D2), (D3), D4]` | `1` | `[D1, 1, (D2), (D3), D4]`
`[D1, (D2), (D3), D4]` | `2` | `[D1, (D2), 1, (D3), D4]`
`[D1, (D2), (D3), D4]` | `3` | `[D1, (D2), (D3), 1, D4]`
`[D1, (D2), (D3), D4]` | `4` | `[D1, (D2), (D3), D4, 1]`
Args:
input: The potentially tensor that should be expanded with a new dimension.
axis: An integer constant indicating where the new dimension should be
inserted.
name: A name for the operation (optional).
Returns:
A tensor with the same values as `input`, with an added dimension of
size 1 at `axis`.
#### Examples:
>>> rt = tf.ragged.constant([[1, 2], [3]])
>>> print(rt.shape)
(2, None)
>>> expanded = tf.expand_dims(rt, axis=0)
>>> print(expanded.shape, expanded)
(1, 2, None) <tf.RaggedTensor [[[1, 2], [3]]]>
>>> expanded = tf.expand_dims(rt, axis=1)
>>> print(expanded.shape, expanded)
(2, 1, None) <tf.RaggedTensor [[[1, 2]], [[3]]]>
>>> expanded = tf.expand_dims(rt, axis=2)
>>> print(expanded.shape, expanded)
(2, None, 1) <tf.RaggedTensor [[[1], [2]], [[3]]]>
"""
with ops.name_scope(name, 'RaggedExpandDims', [input]):
input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
input, name='input')
if not ragged_tensor.is_ragged(input):
return array_ops.expand_dims(input, axis)
ndims = None if input.shape.ndims is None else input.shape.ndims + 1
axis = array_ops.get_positive_axis(axis, ndims, ndims_name='rank(input)')
if axis == 0:
return ragged_tensor.RaggedTensor.from_uniform_row_length(
input, uniform_row_length=input.nrows(), nrows=1, validate=False)
elif axis == 1:
return ragged_tensor.RaggedTensor.from_uniform_row_length(
input, uniform_row_length=1, nrows=input.nrows(), validate=False)
else:
return input.with_values(expand_dims(input.values, axis - 1))
#===============================================================================
# RaggedTensor Size
#===============================================================================
def size(input, out_type=dtypes.int32, name=None): # pylint: disable=redefined-builtin
"""Returns the size of a potentially ragged tensor.
The size of a ragged tensor is the size of its inner values.
#### Example:
>>> tf.size(tf.ragged.constant([[1, 2], [3]])).numpy()
3
Args:
input: A potentially ragged `Tensor`.
out_type: The numeric output type for the operation.
name: A name for the operation (optional).
Returns:
A Tensor of type `out_type`.
"""
if ragged_tensor.is_ragged(input):
return array_ops.size(input.flat_values, out_type=out_type, name=name)
else:
return array_ops.size(input, out_type=out_type, name=name)
#===============================================================================
# ragged.rank
#===============================================================================
def rank(input, name=None): # pylint: disable=redefined-builtin
"""Returns the rank of a RaggedTensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
#### Example:
>>> # shape of tensor 't' is [2, None, None]
>>> t = tf.ragged.constant([[[1], [2, 2]], [[3, 3, 3], [4, 4, 4, 4]]])
>>> tf.rank(t).numpy()
3
Args:
input: A `RaggedTensor`
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, 'RaggedRank', [input]) as name:
if not ragged_tensor.is_ragged(input):
return array_ops.rank(input, name)
return input.ragged_rank + array_ops.rank(input.flat_values)
#===============================================================================
# ragged.one_hot
#===============================================================================
def ragged_one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Applies tf.one_hot along the values of a RaggedTensor."""
# Get the adjusted axis value for the call to array_ops.one_hot.
# Note: the only negative `axis` value supported by array_ops.one_hot is -1.
if isinstance(axis, int) and axis >= 0:
if axis <= indices.ragged_rank:
raise ValueError('axis (%d) must be greater than indices.ragged_rank '
'(%d).' % (axis, indices.ragged_rank))
axis -= indices.ragged_rank
with ops.name_scope(name, 'RaggedOneHot',
[indices, depth, on_value, off_value, axis]):
indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(
indices, name='indices')
return indices.with_flat_values(
array_ops.one_hot(indices.flat_values, depth, on_value, off_value, axis,
dtype, name))
#===============================================================================
# ragged.stack_dynamic_partitions
#===============================================================================
@tf_export('ragged.stack_dynamic_partitions')
@dispatch.add_dispatch_support
def stack_dynamic_partitions(data, partitions, num_partitions, name=None):
"""Stacks dynamic partitions of a Tensor or RaggedTensor.
Returns a RaggedTensor `output` with `num_partitions` rows, where the row
`output[i]` is formed by stacking all slices `data[j1...jN]` such that
`partitions[j1...jN] = i`. Slices of `data` are stacked in row-major
order.
If `num_partitions` is an `int` (not a `Tensor`), then this is equivalent to
`tf.ragged.stack(tf.dynamic_partition(data, partitions, num_partitions))`.
#### Example:
>>> data = ['a', 'b', 'c', 'd', 'e']
>>> partitions = [ 3, 0, 2, 2, 3]
>>> num_partitions = 5
>>> tf.ragged.stack_dynamic_partitions(data, partitions, num_partitions)
<tf.RaggedTensor [[b'b'], [], [b'c', b'd'], [b'a', b'e'], []]>
Args:
data: A `Tensor` or `RaggedTensor` containing the values to stack.
partitions: An `int32` or `int64` `Tensor` or `RaggedTensor` specifying the
partition that each slice of `data` should be added to. `partitions.shape`
must be a prefix of `data.shape`. Values must be greater than or equal to
zero, and less than `num_partitions`. `partitions` is not required to be
sorted.
num_partitions: An `int32` or `int64` scalar specifying the number of
partitions to output. This determines the number of rows in `output`.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the stacked partitions. The returned tensor
has the same dtype as `data`, and its shape is
`[num_partitions, (D)] + data.shape[partitions.rank:]`, where `(D)` is a
ragged dimension whose length is the number of data slices stacked for
each `partition`.
"""
with ops.name_scope(name, 'SegmentStack', [data, partitions, num_partitions]):
# Convert inputs to tensors.
data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')
row_splits_dtype = (
data.row_splits.dtype
if isinstance(data, ragged_tensor.RaggedTensor) else None)
partitions = ragged_tensor.convert_to_tensor_or_ragged_tensor(
partitions, name='partitions', preferred_dtype=row_splits_dtype)
num_partitions = ops.convert_to_tensor(
num_partitions, name='num_partitions', preferred_dtype=partitions.dtype)
if row_splits_dtype is not None:
partitions = math_ops.cast(partitions, row_splits_dtype)
num_partitions = math_ops.cast(num_partitions, partitions.dtype)
# Sanity-checks for shapes.
partitions_rank = partitions.shape.ndims
if partitions_rank is None:
raise ValueError('partitions must have known rank.')
num_partitions.shape.assert_has_rank(0)
partitions.shape.assert_is_compatible_with(data.shape[:partitions_rank])
if partitions_rank == 0:
# If partitions is a scalar, then just create a RaggedTensor containing
# that single the complete `data` value in the specified row.
return ragged_tensor.RaggedTensor.from_value_rowids(
values=array_ops.stack([data]),
value_rowids=array_ops.stack([partitions]),
nrows=num_partitions,
validate=False)
elif partitions_rank == 1:
# If partitions is a vector (the typical case): we can just use data and
# partitions as the `values` and `value_rowids` for `from_value_rowids`,
# as long as we sort them first.
permutation = sort_ops.argsort(partitions, stable=True)
value_rowids = array_ops.gather(partitions, permutation)
values = array_ops.gather(data, permutation)
check = check_ops.assert_less(
value_rowids[-1:],
num_partitions,
message='partitions must be less than num_partitions')
with ops.control_dependencies([check]):
return ragged_tensor.RaggedTensor.from_value_rowids(
values, value_rowids, nrows=num_partitions, validate=False)
else:
# Handle higher-dimensional partitions via recursion.
if not isinstance(data, ragged_tensor.RaggedTensor):
data = ragged_tensor.RaggedTensor.from_tensor(
data, row_splits_dtype=partitions.dtype, ragged_rank=1)
if not isinstance(partitions, ragged_tensor.RaggedTensor):
partitions = ragged_tensor.RaggedTensor.from_tensor(
partitions,
row_splits_dtype=partitions.dtype,
ragged_rank=max(data.ragged_rank, partitions_rank - 1))
check = check_ops.assert_equal(
data.row_splits,
partitions.row_splits,
message='data and partitions have incompatible ragged shapes')
with ops.control_dependencies([check]):
return stack_dynamic_partitions(data.values, partitions.values,
num_partitions)
#===============================================================================
# Reverse
#===============================================================================
def reverse(tensor, axis, name=None):
"""Reverses a RaggedTensor along the specified axes.
#### Example:
>>> data = tf.ragged.constant([
... [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10], [11, 12]]])
>>> tf.reverse(data, axis=[0, 2])
<tf.RaggedTensor [[[8, 7], [10, 9], [12, 11]], [[6, 5]], [[2, 1], [4, 3]]]>
Args:
tensor: A 'RaggedTensor' to reverse.
axis: A list or tuple of 'int' or a constant 1D 'tf.Tensor'. The indices of
the axes to reverse.
name: A name prefix for the returned tensor (optional).
Returns:
A 'RaggedTensor'.
"""
type_error_msg = ('`axis` must be a list of int or a constant tensor'
'when reversing axes in a ragged tensor')
with ops.name_scope(name, 'Reverse', [tensor, axis]):
if isinstance(axis, ops.Tensor):
axis = tensor_util.constant_value(axis)
if axis is None:
raise TypeError(type_error_msg)
elif not (isinstance(axis, (list, tuple)) and
all(isinstance(dim, int) for dim in axis)):
raise TypeError(type_error_msg)
tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(
tensor, name='tensor')
# Allow usage of negative values to specify innermost axes.
axis = [
array_ops.get_positive_axis(dim, tensor.shape.rank, 'axis[%d]' % i,
'rank(tensor)')
for i, dim in enumerate(axis)
]
# We only need to slice up to the max axis. If the axis list
# is empty, it should be 0.
slices = [slice(None)] * (max(axis) + 1 if axis else 0)
for dim in axis:
slices[dim] = slice(None, None, -1)
return tensor[tuple(slices)]
#===============================================================================
# Cross
#===============================================================================
@tf_export('ragged.cross')
@dispatch.add_dispatch_support
def cross(inputs, name=None):
"""Generates feature cross from a list of tensors.
The input tensors must have `rank=2`, and must all have the same number of
rows. The result is a `RaggedTensor` with the same number of rows as the
inputs, where `result[row]` contains a list of all combinations of values
formed by taking a single value from each input's corresponding row
(`inputs[i][row]`). Values are combined by joining their strings with '_X_'.
E.g.:
>>> tf.ragged.cross([tf.ragged.constant([['a'], ['b', 'c']]),
... tf.ragged.constant([['d'], ['e']]),
... tf.ragged.constant([['f'], ['g']])])
<tf.RaggedTensor [[b'a_X_d_X_f'], [b'b_X_e_X_g', b'c_X_e_X_g']]>
Args:
inputs: A list of `RaggedTensor` or `Tensor` or `SparseTensor`.
name: Optional name for the op.
Returns:
A 2D `RaggedTensor` of type `string`.
"""
return _cross_internal(inputs=inputs, hashed_output=False, name=name)
@tf_export('ragged.cross_hashed')
@dispatch.add_dispatch_support
def cross_hashed(inputs, num_buckets=0, hash_key=None, name=None):
"""Generates hashed feature cross from a list of tensors.
The input tensors must have `rank=2`, and must all have the same number of
rows. The result is a `RaggedTensor` with the same number of rows as the
inputs, where `result[row]` contains a list of all combinations of values
formed by taking a single value from each input's corresponding row
(`inputs[i][row]`). Values are combined by hashing together their
fingerprints. E.g.:
>>> tf.ragged.cross_hashed([tf.ragged.constant([['a'], ['b', 'c']]),
... tf.ragged.constant([['d'], ['e']]),
... tf.ragged.constant([['f'], ['g']])],
... num_buckets=100)
<tf.RaggedTensor [[78], [66, 74]]>
Args:
inputs: A list of `RaggedTensor` or `Tensor` or `SparseTensor`.
num_buckets: A non-negative `int` that used to bucket the hashed values. If
`num_buckets != 0`, then `output = hashed_value % num_buckets`.
hash_key: Integer hash_key that will be used by the `FingerprintCat64`
function. If not given, a default key is used.
name: Optional name for the op.
Returns:
A 2D `RaggedTensor` of type `int64`.
"""
return _cross_internal(
inputs=inputs,
hashed_output=True,
num_buckets=num_buckets,
hash_key=hash_key,
name=name)
_DEFAULT_CROSS_HASH_KEY = 0xDECAFCAFFE
def _cross_internal(inputs,
hashed_output=False,
num_buckets=0,
hash_key=None,
name=None):
"""Generates feature cross from a list of ragged and dense tensors."""
if not isinstance(inputs, (tuple, list)):
raise TypeError('Inputs must be a list')
if hash_key is None:
hash_key = _DEFAULT_CROSS_HASH_KEY
ragged_inputs = []
sparse_inputs = []
dense_inputs = []
input_order = []
with ops.name_scope(name, 'RaggedCross', inputs):
for i, t in enumerate(inputs):
if sparse_tensor.is_sparse(t):
t = sparse_tensor.SparseTensor.from_value(t)
else:
t = ragged_tensor.convert_to_tensor_or_ragged_tensor(t)
if t.dtype.is_integer:
t = math_ops.cast(t, dtypes.int64)
elif t.dtype != dtypes.string:
raise ValueError('Unexpected dtype for inputs[%d]: %s' % (i, t.dtype))
if isinstance(t, ragged_tensor.RaggedTensor):
if t.ragged_rank != 1:
raise ValueError('tf.ragged.cross only supports inputs with rank=2')
ragged_inputs.append(t)
input_order.append('R')
elif isinstance(t, sparse_tensor.SparseTensor):
sparse_inputs.append(t)
input_order.append('S')
else:
dense_inputs.append(t)
input_order.append('D')
out_values_type = dtypes.int64 if hashed_output else dtypes.string
if ragged_inputs and all(
t.row_splits.dtype == dtypes.int32 for t in ragged_inputs):
out_row_splits_type = dtypes.int32
else:
out_row_splits_type = dtypes.int64
# Convert hash_key from uint64 -> int64, since we need to pass it via
# an int64 attr.
if hash_key > 2**63:
hash_key -= 2**64
values_out, splits_out = gen_ragged_array_ops.ragged_cross(
ragged_values=[rt.values for rt in ragged_inputs],
ragged_row_splits=[rt.row_splits for rt in ragged_inputs],
sparse_indices=[st.indices for st in sparse_inputs],
sparse_values=[st.values for st in sparse_inputs],
sparse_shape=[st.dense_shape for st in sparse_inputs],
dense_inputs=dense_inputs,
input_order=''.join(input_order),
hashed_output=hashed_output,
num_buckets=num_buckets,
hash_key=hash_key,
out_values_type=out_values_type.as_datatype_enum,
out_row_splits_type=out_row_splits_type.as_datatype_enum,
name=name)
return ragged_tensor.RaggedTensor.from_row_splits(
values_out, splits_out, validate=False)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_messaging.rpc import dispatcher
import six
import webob
from heat.common import exception
from heat.common.i18n import _
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine.hot import template as hot_tmpl
from heat.engine import resources
from heat.engine import service
from heat.engine import stack as parser
from heat.engine import template as tmpl
from heat.tests import common
from heat.tests.openstack.nova import fakes as fakes_nova
from heat.tests import utils
test_template_volumeattach = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Delete",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": "test_KeyName"
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/%s"
}
}
}
}
'''
test_template_ref = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "%s" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_findinmap_valid = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_findinmap_invalid = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String"
}
},
"Mappings" : {
"AWSInstanceType2Arch" : {
"t1.micro" : { "Arch" : "64" },
"m1.small" : { "Arch" : "64" },
"m1.medium" : { "Arch" : "64" },
"m1.large" : { "Arch" : "64" },
"m1.xlarge" : { "Arch" : "64" },
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"cc1.4xlarge" : { "Arch" : "64HVM" },
"cc2.8xlarge" : { "Arch" : "64HVM" },
"cg1.4xlarge" : { "Arch" : "64HVM" }
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : {
"Fn::FindInMap" : [
"DistroArch2AMI", { "Ref" : "LinuxDistribution" },
{ "Fn::FindInMap" : [
"AWSInstanceType2Arch",
{ "Ref" : "InstanceType" }, "Arch" ] } ]
},
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName"}
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_invalid_resources = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "AWS CloudFormation Sample Template for xyz.",
"Parameters" : {
"InstanceType" : {
"Description" : "Defined instance type",
"Type" : "String",
"Default" : "node.ee",
"AllowedValues" : ["node.ee", "node.apache", "node.api"],
"ConstraintDescription" : "must be a valid instance type."
}
},
"Resources" : {
"Type" : "AWS::EC2::Instance"
}
}
'''
test_template_invalid_property = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2 KeyPai",
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"UnknownProperty": "unknown"
}
}
}
}
'''
test_template_unimplemented_property = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SourceDestCheck": "false"
}
}
}
}
'''
test_template_invalid_deletion_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Destroy",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_snapshot_deletion_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Snapshot",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_volume_snapshot = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"DeletionPolicy": "Snapshot",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
}
}
}
'''
test_unregistered_key = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_image = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_invalid_secgroups = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SecurityGroups": [ "default" ],
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_invalid_secgroupids = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SecurityGroupIds": [ "default" ],
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_glance_client_exception = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Delete",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large"
}
}
}
}
'''
test_template_unique_logical_name = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String"
},
"AName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String",
}
},
"Resources" : {
"AName": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_cfn_parameter_label = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
"Description" : "Name of an existing EC2KeyPair",
"Type" : "String",
"Label" : "Nova KeyPair Name"
}
},
"Resources" : {
"AName": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_hot_parameter_label = '''
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameters:
KeyName:
type: string
description: Name of an existing key pair to use for the instance
label: Nova KeyPair Name
resources:
my_instance:
type: AWS::EC2::Instance
properties:
KeyName: { get_param: KeyName }
ImageId: { get_param: ImageId }
InstanceType: { get_param: InstanceType }
outputs:
instance_ip:
description: The IP address of the deployed instance
value: { get_attr: [my_instance, PublicIp] }
'''
test_template_duplicate_parameters = '''
# This is a hello world HOT template just defining a single compute instance
heat_template_version: 2013-05-23
parameter_groups:
- label: Server Group
description: A group of parameters for the server
parameters:
- InstanceType
- KeyName
- ImageId
- label: Database Group
description: A group of parameters for the database
parameters:
- db_password
- db_port
- InstanceType
parameters:
KeyName:
type: string
description: Name of an existing key pair to use for the instance
InstanceType:
type: string
description: Instance type for the instance to be created
default: m1.small
constraints:
- allowed_values: [m1.tiny, m1.small, m1.large]
description: Value must be one of 'm1.tiny', 'm1.small' or 'm1.large'
ImageId:
type: string
description: ID of the image to use for the instance
# parameters below are not used in template, but are for verifying parameter
# validation support in HOT
db_password:
type: string
description: Database password
hidden: true
constraints:
- length: { min: 6, max: 8 }
description: Password length must be between 6 and 8 characters
- allowed_pattern: "[a-zA-Z0-9]+"
description: Password must consist of characters and numbers only
- allowed_pattern: "[A-Z]+[a-zA-Z0-9]*"
description: Password must start with an uppercase character
db_port:
type: number
description: Database port number
default: 50000
constraints:
- range: { min: 40000, max: 60000 }
description: Port number must be between 40000 and 60000
resources:
my_instance:
# Use an AWS resource type since this exists; so why use other name here?
type: AWS::EC2::Instance
properties:
KeyName: { get_param: KeyName }
ImageId: { get_param: ImageId }
InstanceType: { get_param: InstanceType }
outputs:
instance_ip:
description: The IP address of the deployed instance
value: { get_attr: [my_instance, PublicIp] }
'''
test_template_invalid_parameter_name = '''
# This is a hello world HOT template just defining a single compute instance
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameter_groups:
- label: Server Group
description: A group of parameters for the server
parameters:
- InstanceType
- KeyName
- ImageId
- label: Database Group
description: A group of parameters for the database
parameters:
- db_password
- db_port
- SomethingNotHere
parameters:
KeyName:
type: string
description: Name of an existing key pair to use for the instance
InstanceType:
type: string
description: Instance type for the instance to be created
default: m1.small
constraints:
- allowed_values: [m1.tiny, m1.small, m1.large]
description: Value must be one of 'm1.tiny', 'm1.small' or 'm1.large'
ImageId:
type: string
description: ID of the image to use for the instance
# parameters below are not used in template, but are for verifying parameter
# validation support in HOT
db_password:
type: string
description: Database password
hidden: true
constraints:
- length: { min: 6, max: 8 }
description: Password length must be between 6 and 8 characters
- allowed_pattern: "[a-zA-Z0-9]+"
description: Password must consist of characters and numbers only
- allowed_pattern: "[A-Z]+[a-zA-Z0-9]*"
description: Password must start with an uppercase character
db_port:
type: number
description: Database port number
default: 50000
constraints:
- range: { min: 40000, max: 60000 }
description: Port number must be between 40000 and 60000
resources:
my_instance:
# Use an AWS resource type since this exists; so why use other name here?
type: AWS::EC2::Instance
properties:
KeyName: { get_param: KeyName }
ImageId: { get_param: ImageId }
InstanceType: { get_param: InstanceType }
outputs:
instance_ip:
description: The IP address of the deployed instance
value: { get_attr: [my_instance, PublicIp] }
'''
test_template_hot_no_parameter_label = '''
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameters:
KeyName:
type: string
description: Name of an existing key pair to use for the instance
resources:
my_instance:
type: AWS::EC2::Instance
properties:
KeyName: { get_param: KeyName }
ImageId: { get_param: ImageId }
InstanceType: { get_param: InstanceType }
'''
test_template_no_parameters = '''
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameter_groups:
- label: Server Group
description: A group of parameters for the server
- label: Database Group
description: A group of parameters for the database
resources:
server:
type: OS::Nova::Server
'''
test_template_parameter_groups_not_list = '''
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameter_groups:
label: Server Group
description: A group of parameters for the server
parameters:
key_name: heat_key
label: Database Group
description: A group of parameters for the database
parameters:
public_net: public
resources:
server:
type: OS::Nova::Server
'''
test_template_parameters_not_list = '''
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameter_groups:
- label: Server Group
description: A group of parameters for the server
parameters:
key_name: heat_key
public_net: public
resources:
server:
type: OS::Nova::Server
'''
test_template_parameters_error_no_label = '''
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameter_groups:
- parameters:
key_name: heat_key
resources:
server:
type: OS::Nova::Server
'''
test_template_parameters_duplicate_no_label = '''
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameters:
key_name:
type: string
description: Name of an existing key pair to use for the instance
default: heat_key
parameter_groups:
- parameters:
- key_name
- parameters:
- key_name
resources:
server:
type: OS::Nova::Server
'''
test_template_invalid_parameter_no_label = '''
heat_template_version: 2013-05-23
description: >
Hello world HOT template that just defines a single compute instance.
Contains just base features to verify base HOT support.
parameter_groups:
- parameters:
- key_name
resources:
server:
type: OS::Nova::Server
'''
test_template_allowed_integers = '''
heat_template_version: 2013-05-23
parameters:
size:
type: number
constraints:
- allowed_values: [1, 4, 8]
resources:
my_volume:
type: OS::Cinder::Volume
properties:
size: { get_param: size }
'''
test_template_allowed_integers_str = '''
heat_template_version: 2013-05-23
parameters:
size:
type: number
constraints:
- allowed_values: ['1', '4', '8']
resources:
my_volume:
type: OS::Cinder::Volume
properties:
size: { get_param: size }
'''
test_template_default_override = '''
heat_template_version: 2013-05-23
description: create a network
parameters:
net_name:
type: string
default: defaultnet
description: Name of private network to be created
resources:
private_net:
type: OS::Neutron::Net
properties:
name: { get_param: net_name }
'''
test_template_no_default = '''
heat_template_version: 2013-05-23
description: create a network
parameters:
net_name:
type: string
description: Name of private network to be created
resources:
private_net:
type: OS::Neutron::Net
properties:
name: { get_param: net_name }
'''
test_template_invalid_outputs = '''
heat_template_version: 2013-05-23
resources:
random_str:
type: OS::Heat::RandomString
outputs:
string:
value: {get_attr: [[random_str, value]]}
'''
class ValidateTest(common.HeatTestCase):
def setUp(self):
super(ValidateTest, self).setUp()
resources.initialise()
self.fc = fakes_nova.FakeClient()
self.gc = fakes_nova.FakeClient()
resources.initialise()
self.ctx = utils.dummy_context()
self.mock_isa = mock.patch(
'heat.engine.resource.Resource.is_service_available',
return_value=True)
self.mock_is_service_available = self.mock_isa.start()
self.addCleanup(self.mock_isa.stop)
self.engine = service.EngineService('a', 't')
def _mock_get_image_id_success(self, imageId_input, imageId):
self.m.StubOutWithMock(glance.GlanceClientPlugin,
'find_image_by_name_or_id')
glance.GlanceClientPlugin.find_image_by_name_or_id(
imageId_input).MultipleTimes().AndReturn(imageId)
def _mock_get_image_id_fail(self, image_id, exp):
self.m.StubOutWithMock(glance.GlanceClientPlugin,
'find_image_by_name_or_id')
glance.GlanceClientPlugin.find_image_by_name_or_id(
image_id).AndRaise(exp)
def test_validate_volumeattach_valid(self):
t = template_format.parse(test_template_volumeattach % 'vdq')
stack = parser.Stack(self.ctx, 'test_stack', tmpl.Template(t))
volumeattach = stack['MountPoint']
self.assertIsNone(volumeattach.validate())
def test_validate_volumeattach_invalid(self):
t = template_format.parse(test_template_volumeattach % 'sda')
stack = parser.Stack(self.ctx, 'test_stack', tmpl.Template(t))
volumeattach = stack['MountPoint']
self.assertRaises(exception.StackValidationFailed,
volumeattach.validate)
def test_validate_ref_valid(self):
t = template_format.parse(test_template_ref % 'WikiDatabase')
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertEqual('test.', res['Description'])
def test_validate_with_environment(self):
test_template = test_template_ref % 'WikiDatabase'
test_template = test_template.replace('AWS::EC2::Instance',
'My::Instance')
t = template_format.parse(test_template)
params = {'resource_registry': {'My::Instance': 'AWS::EC2::Instance'}}
res = dict(self.engine.validate_template(self.ctx, t, params))
self.assertEqual('test.', res['Description'])
def test_validate_hot_valid(self):
t = template_format.parse(
"""
heat_template_version: 2013-05-23
description: test.
resources:
my_instance:
type: AWS::EC2::Instance
""")
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertEqual('test.', res['Description'])
def test_validate_ref_invalid(self):
t = template_format.parse(test_template_ref % 'WikiDatabasez')
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertNotEqual(res['Description'], 'Successfully validated')
def test_validate_findinmap_valid(self):
t = template_format.parse(test_template_findinmap_valid)
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertEqual('test.', res['Description'])
def test_validate_findinmap_invalid(self):
t = template_format.parse(test_template_findinmap_invalid)
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertNotEqual(res['Description'], 'Successfully validated')
def test_validate_parameters(self):
t = template_format.parse(test_template_ref % 'WikiDatabase')
res = dict(self.engine.validate_template(self.ctx, t, {}))
# Note: the assertion below does not expect a CFN dict of the parameter
# but a dict of the parameters.Schema object.
# For API CFN backward compatibility, formating to CFN is done in the
# API layer in heat.engine.api.format_validate_parameter.
expected = {'KeyName': {
'Type': 'String',
'Description': 'Name of an existing EC2KeyPair',
'NoEcho': 'false',
'Label': 'KeyName'}}
self.assertEqual(expected, res['Parameters'])
def test_validate_parameters_env_override(self):
t = template_format.parse(test_template_default_override)
env_params = {'net_name': 'betternetname'}
res = dict(self.engine.validate_template(self.ctx, t, env_params))
self.assertEqual('defaultnet',
res['Parameters']['net_name']['Default'])
self.assertEqual('betternetname',
res['Parameters']['net_name']['Value'])
def test_validate_parameters_env_provided(self):
t = template_format.parse(test_template_no_default)
env_params = {'net_name': 'betternetname'}
res = dict(self.engine.validate_template(self.ctx, t, env_params))
self.assertEqual('betternetname',
res['Parameters']['net_name']['Value'])
self.assertNotIn('Default', res['Parameters']['net_name'])
def test_validate_hot_empty_parameters_valid(self):
t = template_format.parse(
"""
heat_template_version: 2013-05-23
description: test.
parameters:
resources:
my_instance:
type: AWS::EC2::Instance
""")
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertEqual({}, res['Parameters'])
def test_validate_hot_parameter_label(self):
t = template_format.parse(test_template_hot_parameter_label)
res = dict(self.engine.validate_template(self.ctx, t, {}))
parameters = res['Parameters']
expected = {'KeyName': {
'Type': 'String',
'Description': 'Name of an existing key pair to use for the '
'instance',
'NoEcho': 'false',
'Label': 'Nova KeyPair Name'}}
self.assertEqual(expected, parameters)
def test_validate_hot_no_parameter_label(self):
t = template_format.parse(test_template_hot_no_parameter_label)
res = dict(self.engine.validate_template(self.ctx, t, {}))
parameters = res['Parameters']
expected = {'KeyName': {
'Type': 'String',
'Description': 'Name of an existing key pair to use for the '
'instance',
'NoEcho': 'false',
'Label': 'KeyName'}}
self.assertEqual(expected, parameters)
def test_validate_cfn_parameter_label(self):
t = template_format.parse(test_template_cfn_parameter_label)
res = dict(self.engine.validate_template(self.ctx, t, {}))
parameters = res['Parameters']
expected = {'KeyName': {
'Type': 'String',
'Description': 'Name of an existing EC2KeyPair',
'NoEcho': 'false',
'Label': 'Nova KeyPair Name'}}
self.assertEqual(expected, parameters)
def test_validate_hot_parameter_type(self):
t = template_format.parse(
"""
heat_template_version: 2013-05-23
parameters:
param1:
type: string
param2:
type: number
param3:
type: json
param4:
type: comma_delimited_list
param5:
type: boolean
""")
res = dict(self.engine.validate_template(self.ctx, t, {}))
parameters = res['Parameters']
# make sure all the types are reported correctly
self.assertEqual('String', parameters["param1"]["Type"])
self.assertEqual('Number', parameters["param2"]["Type"])
self.assertEqual('Json', parameters["param3"]["Type"])
self.assertEqual('CommaDelimitedList', parameters["param4"]["Type"])
self.assertEqual('Boolean', parameters["param5"]["Type"])
def test_validate_hot_empty_resources_valid(self):
t = template_format.parse(
"""
heat_template_version: 2013-05-23
description: test.
resources:
""")
res = dict(self.engine.validate_template(self.ctx, t, {}))
expected = {"Description": "test.",
"Parameters": {}}
self.assertEqual(expected, res)
def test_validate_hot_empty_outputs_valid(self):
t = template_format.parse(
"""
heat_template_version: 2013-05-23
description: test.
outputs:
""")
res = dict(self.engine.validate_template(self.ctx, t, {}))
expected = {"Description": "test.",
"Parameters": {}}
self.assertEqual(expected, res)
def test_validate_properties(self):
t = template_format.parse(test_template_invalid_property)
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertEqual(
{'Error': 'Property error: Resources.WikiDatabase.Properties: '
'Unknown Property UnknownProperty'}, res)
def test_invalid_resources(self):
t = template_format.parse(test_template_invalid_resources)
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertEqual({'Error': 'Resources must contain Resource. '
'Found a [%s] instead' % six.text_type},
res)
def test_invalid_section_cfn(self):
t = template_format.parse(
"""
{
'AWSTemplateFormatVersion': '2010-09-09',
'Resources': {
'server': {
'Type': 'OS::Nova::Server'
}
},
'Output': {}
}
""")
res = dict(self.engine.validate_template(self.ctx, t))
self.assertEqual({'Error': 'The template section is invalid: Output'},
res)
def test_invalid_section_hot(self):
t = template_format.parse(
"""
heat_template_version: 2013-05-23
resources:
server:
type: OS::Nova::Server
output:
""")
res = dict(self.engine.validate_template(self.ctx, t))
self.assertEqual({'Error': 'The template section is invalid: output'},
res)
def test_unimplemented_property(self):
t = template_format.parse(test_template_unimplemented_property)
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertEqual(
{'Error': 'Property error: Resources.WikiDatabase.Properties: '
'Property SourceDestCheck not implemented yet'},
res)
def test_invalid_deletion_policy(self):
t = template_format.parse(test_template_invalid_deletion_policy)
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertEqual({'Error': 'Invalid deletion policy "Destroy"'}, res)
def test_snapshot_deletion_policy(self):
t = template_format.parse(test_template_snapshot_deletion_policy)
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertEqual(
{'Error': '"Snapshot" deletion policy not supported'}, res)
def test_volume_snapshot_deletion_policy(self):
t = template_format.parse(test_template_volume_snapshot)
res = dict(self.engine.validate_template(self.ctx, t, {}))
self.assertEqual({'Description': u'test.', 'Parameters': {}}, res)
def test_validate_template_without_resources(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
''')
res = dict(self.engine.validate_template(self.ctx, hot_tpl, {}))
expected = {'Description': 'No description', 'Parameters': {}}
self.assertEqual(expected, res)
def test_validate_template_with_invalid_resource_type(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
Type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
res = dict(self.engine.validate_template(self.ctx, hot_tpl, {}))
self.assertEqual({'Error': '"Type" is not a valid keyword '
'inside a resource definition'}, res)
def test_validate_template_with_invalid_resource_properties(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
Properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
res = dict(self.engine.validate_template(self.ctx, hot_tpl, {}))
self.assertEqual({'Error': '"Properties" is not a valid keyword '
'inside a resource definition'}, res)
def test_validate_template_with_invalid_resource_matadata(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
Metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
res = dict(self.engine.validate_template(self.ctx, hot_tpl, {}))
self.assertEqual({'Error': '"Metadata" is not a valid keyword '
'inside a resource definition'}, res)
def test_validate_template_with_invalid_resource_depends_on(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
DependsOn: dummy
deletion_policy: dummy
update_policy:
foo: bar
''')
res = dict(self.engine.validate_template(self.ctx, hot_tpl, {}))
self.assertEqual({'Error': '"DependsOn" is not a valid keyword '
'inside a resource definition'}, res)
def test_validate_template_with_invalid_resource_deletion_policy(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
DeletionPolicy: dummy
update_policy:
foo: bar
''')
res = dict(self.engine.validate_template(self.ctx, hot_tpl, {}))
self.assertEqual({'Error': '"DeletionPolicy" is not a valid '
'keyword inside a resource definition'},
res)
def test_validate_template_with_invalid_resource_update_policy(self):
hot_tpl = template_format.parse('''
heat_template_version: 2013-05-23
resources:
resource1:
type: AWS::EC2::Instance
properties:
property1: value1
metadata:
foo: bar
depends_on: dummy
deletion_policy: dummy
UpdatePolicy:
foo: bar
''')
res = dict(self.engine.validate_template(self.ctx, hot_tpl, {}))
self.assertEqual({'Error': '"UpdatePolicy" is not a valid '
'keyword inside a resource definition'},
res)
def test_unregistered_key(self):
t = template_format.parse(test_unregistered_key)
params = {'KeyName': 'not_registered'}
template = tmpl.Template(t, env=environment.Environment(params))
stack = parser.Stack(self.ctx, 'test_stack', template)
self.stub_FlavorConstraint_validate()
self.stub_ImageConstraint_validate()
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.StackValidationFailed, resource.validate)
self.m.VerifyAll()
def test_unregistered_image(self):
t = template_format.parse(test_template_image)
template = tmpl.Template(t,
env=environment.Environment(
{'KeyName': 'test'}))
stack = parser.Stack(self.ctx, 'test_stack', template)
self._mock_get_image_id_fail('image_name',
exception.EntityNotFound(
entity='Image',
name='image_name'))
self.stub_KeypairConstraint_validate()
self.stub_FlavorConstraint_validate()
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.StackValidationFailed, resource.validate)
self.m.VerifyAll()
def test_duplicated_image(self):
t = template_format.parse(test_template_image)
template = tmpl.Template(t,
env=environment.Environment(
{'KeyName': 'test'}))
stack = parser.Stack(self.ctx, 'test_stack', template)
self._mock_get_image_id_fail('image_name',
exception.PhysicalResourceNameAmbiguity(
name='image_name'))
self.stub_KeypairConstraint_validate()
self.stub_FlavorConstraint_validate()
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.StackValidationFailed,
resource.validate)
self.m.VerifyAll()
def test_invalid_security_groups_with_nics(self):
t = template_format.parse(test_template_invalid_secgroups)
template = tmpl.Template(t,
env=environment.Environment(
{'KeyName': 'test'}))
stack = parser.Stack(self.ctx, 'test_stack', template)
self._mock_get_image_id_success('image_name', 'image_id')
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.ResourcePropertyConflict,
resource.validate)
self.m.VerifyAll()
def test_invalid_security_group_ids_with_nics(self):
t = template_format.parse(test_template_invalid_secgroupids)
template = tmpl.Template(
t, env=environment.Environment({'KeyName': 'test'}))
stack = parser.Stack(self.ctx, 'test_stack', template)
self._mock_get_image_id_success('image_name', 'image_id')
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.ResourcePropertyConflict,
resource.validate)
self.m.VerifyAll()
def test_client_exception_from_glance_client(self):
t = template_format.parse(test_template_glance_client_exception)
template = tmpl.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
self.m.StubOutWithMock(self.gc.images, 'get')
self.gc.images.get('image_name').AndRaise(glance.exc.HTTPNotFound())
self.m.StubOutWithMock(glance.GlanceClientPlugin, 'client')
glance.GlanceClientPlugin.client().AndReturn(self.gc)
self.stub_FlavorConstraint_validate()
self.m.ReplayAll()
self.assertRaises(exception.StackValidationFailed, stack.validate)
self.m.VerifyAll()
def test_validate_unique_logical_name(self):
t = template_format.parse(test_template_unique_logical_name)
template = tmpl.Template(
t, env=environment.Environment(
{'AName': 'test', 'KeyName': 'test'}))
stack = parser.Stack(self.ctx, 'test_stack', template)
self.assertRaises(exception.StackValidationFailed, stack.validate)
def test_validate_duplicate_parameters_in_group(self):
t = template_format.parse(test_template_duplicate_parameters)
template = hot_tmpl.HOTemplate20130523(
t, env=environment.Environment({
'KeyName': 'test',
'ImageId': 'sometestid',
'db_password': 'Pass123'
}))
stack = parser.Stack(self.ctx, 'test_stack', template)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertEqual(_('Parameter Groups error: '
'parameter_groups.Database '
'Group: The InstanceType parameter must be '
'assigned to one parameter group only.'),
six.text_type(exc))
def test_validate_duplicate_parameters_no_label(self):
t = template_format.parse(test_template_parameters_duplicate_no_label)
template = hot_tmpl.HOTemplate20130523(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertEqual(_('Parameter Groups error: '
'parameter_groups.: '
'The key_name parameter must be '
'assigned to one parameter group only.'),
six.text_type(exc))
def test_validate_invalid_parameter_in_group(self):
t = template_format.parse(test_template_invalid_parameter_name)
template = hot_tmpl.HOTemplate20130523(t,
env=environment.Environment({
'KeyName': 'test',
'ImageId': 'sometestid',
'db_password': 'Pass123'}))
stack = parser.Stack(self.ctx, 'test_stack', template)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertEqual(_('Parameter Groups error: '
'parameter_groups.Database Group: The grouped '
'parameter SomethingNotHere does not '
'reference a valid parameter.'),
six.text_type(exc))
def test_validate_invalid_parameter_no_label(self):
t = template_format.parse(test_template_invalid_parameter_no_label)
template = hot_tmpl.HOTemplate20130523(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertEqual(_('Parameter Groups error: '
'parameter_groups.: The grouped '
'parameter key_name does not '
'reference a valid parameter.'),
six.text_type(exc))
def test_validate_no_parameters_in_group(self):
t = template_format.parse(test_template_no_parameters)
template = hot_tmpl.HOTemplate20130523(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertEqual(_('Parameter Groups error: parameter_groups.Server '
'Group: The parameters must be provided for each '
'parameter group.'), six.text_type(exc))
def test_validate_parameter_groups_not_list(self):
t = template_format.parse(test_template_parameter_groups_not_list)
template = hot_tmpl.HOTemplate20130523(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertEqual(_('Parameter Groups error: parameter_groups: '
'The parameter_groups should be '
'a list.'), six.text_type(exc))
def test_validate_parameters_not_list(self):
t = template_format.parse(test_template_parameters_not_list)
template = hot_tmpl.HOTemplate20130523(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertEqual(_('Parameter Groups error: '
'parameter_groups.Server Group: '
'The parameters of parameter group should be '
'a list.'), six.text_type(exc))
def test_validate_parameters_error_no_label(self):
t = template_format.parse(test_template_parameters_error_no_label)
template = hot_tmpl.HOTemplate20130523(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
exc = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertEqual(_('Parameter Groups error: parameter_groups.: '
'The parameters of parameter group should be '
'a list.'), six.text_type(exc))
def test_validate_allowed_values_integer(self):
t = template_format.parse(test_template_allowed_integers)
template = tmpl.Template(t,
env=environment.Environment({'size': '4'}))
# test with size parameter provided as string
stack = parser.Stack(self.ctx, 'test_stack', template)
self.assertIsNone(stack.validate())
# test with size parameter provided as number
template.env = environment.Environment({'size': 4})
stack = parser.Stack(self.ctx, 'test_stack', template)
self.assertIsNone(stack.validate())
def test_validate_allowed_values_integer_str(self):
t = template_format.parse(test_template_allowed_integers_str)
template = tmpl.Template(t,
env=environment.Environment({'size': '4'}))
# test with size parameter provided as string
stack = parser.Stack(self.ctx, 'test_stack', template)
self.assertIsNone(stack.validate())
# test with size parameter provided as number
template.env = environment.Environment({'size': 4})
stack = parser.Stack(self.ctx, 'test_stack', template)
self.assertIsNone(stack.validate())
def test_validate_not_allowed_values_integer(self):
t = template_format.parse(test_template_allowed_integers)
template = tmpl.Template(t,
env=environment.Environment({'size': '3'}))
# test with size parameter provided as string
stack = parser.Stack(self.ctx, 'test_stack', template)
err = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('"3" is not an allowed value [1, 4, 8]',
six.text_type(err))
# test with size parameter provided as number
template.env = environment.Environment({'size': 3})
stack = parser.Stack(self.ctx, 'test_stack', template)
err = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('"3" is not an allowed value [1, 4, 8]',
six.text_type(err))
def test_validate_not_allowed_values_integer_str(self):
t = template_format.parse(test_template_allowed_integers_str)
template = tmpl.Template(t,
env=environment.Environment({'size': '3'}))
# test with size parameter provided as string
stack = parser.Stack(self.ctx, 'test_stack', template)
err = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('"3" is not an allowed value [1, 4, 8]',
six.text_type(err))
# test with size parameter provided as number
template.env = environment.Environment({'size': 3})
stack = parser.Stack(self.ctx, 'test_stack', template)
err = self.assertRaises(exception.StackValidationFailed,
stack.validate)
self.assertIn('"3" is not an allowed value [1, 4, 8]',
six.text_type(err))
def test_validate_invalid_outputs(self):
t = template_format.parse(test_template_invalid_outputs)
template = tmpl.Template(t)
err = self.assertRaises(exception.StackValidationFailed,
parser.Stack, self.ctx, 'test_stack', template)
error_message = ('Arguments to "get_attr" must be of the form '
'[resource_name, attribute, (path), ...]')
self.assertEqual(error_message, six.text_type(err))
def test_validate_resource_attr_invalid_type(self):
t = template_format.parse("""
heat_template_version: 2013-05-23
resources:
resource:
type: 123
""")
template = tmpl.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
ex = self.assertRaises(exception.StackValidationFailed, stack.validate)
self.assertEqual('Resource resource type type must be string',
six.text_type(ex))
def test_validate_resource_attr_invalid_type_cfn(self):
t = template_format.parse("""
HeatTemplateFormatVersion: '2012-12-12'
Resources:
Resource:
Type: [Wrong, Type]
""")
stack = parser.Stack(self.ctx, 'test_stack', tmpl.Template(t))
ex = self.assertRaises(exception.StackValidationFailed, stack.validate)
self.assertEqual('Resource Resource Type type must be string',
six.text_type(ex))
def test_validate_is_service_available(self):
t = template_format.parse(
"""
heat_template_version: 2015-10-15
resources:
my_instance:
type: AWS::EC2::Instance
""")
self.mock_is_service_available.return_value = False
ex = self.assertRaises(dispatcher.ExpectedException,
self.engine.validate_template,
self.ctx,
t,
{})
self.assertEqual(exception.ResourceTypeUnavailable, ex.exc_info[0])
def test_validate_with_ignorable_errors(self):
t = template_format.parse(
"""
heat_template_version: 2015-10-15
resources:
my_instance:
type: AWS::EC2::Instance
""")
engine = service.EngineService('a', 't')
self.mock_is_service_available.return_value = False
res = dict(engine.validate_template(
self.ctx,
t,
{},
ignorable_errors=[exception.ResourceTypeUnavailable.error_code]))
expected = {'Description': 'No description', 'Parameters': {}}
self.assertEqual(expected, res)
def test_validate_with_ignorable_errors_invalid_error_code(self):
engine = service.EngineService('a', 't')
invalide_error_code = '123456'
invalid_codes = ['99001', invalide_error_code]
res = engine.validate_template(
self.ctx,
mock.MagicMock(),
{},
ignorable_errors=invalid_codes)
msg = _("Invalid codes in ignore_errors : %s") % [invalide_error_code]
ex = webob.exc.HTTPBadRequest(explanation=msg)
self.assertIsInstance(res, webob.exc.HTTPBadRequest)
self.assertEqual(ex.explanation, res.explanation)
| |
from genomics_test_generator import fhir_genomics_test_gene
from request_sender import *
from services.create_resource import *
import random
import json
import traceback
import requests
from django.db import transaction
from home.models import task, task_steps, step_detail
resource_list = ['DiagnosticReport', 'FamilyMemberHistory', 'Sequence', 'DiagnosticRequest', 'Observation']
spec_basepath = 'resources/spec/'
resource_basepath = 'resources/json/'
ga4gh_server = 'http://ideaworld.org:6060/'
gene_variant_extension = [
{
"url": "http://hl7.org/fhir/StructureDefinition/observation-geneticsDNASequenceVariant",
"valueString": "NG_007726.3:g.146252T>G"
},
{
"url": "http://hl7.org/fhir/StructureDefinition/observation-geneticsGene",
"valueCodeableConcept": {
"coding": [
{
"system": "http://www.genenames.org",
"code": "3236",
"display": "EGFR"
}
]
}
}
]
genetic_observation_extension = [
{
"url": "http://hl7.org/fhir/StructureDefinition/observation-geneticsDNASequenceVariant",
"valueString": "NG_007726.3:g.146252T>G"
},
{
"url": "http://hl7.org/fhir/StructureDefinition/observation-geneticsGene",
"valueCodeableConcept": {
"coding": [
{
"system": "http://www.genenames.org",
"code": "3236",
"display": "EGFR"
}
]
}
},
{
"url": "http://hl7.org/fhir/StructureDefinition/observation-geneticsDNARegionName",
"valueString": "Exon 21"
},
{
"url": "http://hl7.org/fhir/StructureDefinition/observation-geneticsGenomicSourceClass",
"valueCodeableConcept": {
"coding": [
{
"system": "http://hl7.org/fhir/LOINC-48002-0-answerlist",
"code": "LA6684-0",
"display": "somatic"
}
]
}
}
]
base_fake_path = 'resources/fake_data/'
fake_info = []
def save_step2fake(info):
fake_info.append({
'type':'step',
'info':info
})
def save_detail2fake(info):
info['req_header'] = json.dumps(dict(info['req_header'])) if info['req_header'] else None
info['res_header'] = json.dumps(dict(info['res_header'])) if info['res_header'] else None
info['resource'] = json.dumps(dict(info['resource'])) if info['resource'] else None
info['response'] = json.dumps(dict(info['response'])) if info['response'] else None
fake_info.append({
'type':'detail',
'info':info
})
def save_fake2file():
file_obj = open('%sfake_fhir.json' % base_fake_path, 'w')
file_obj.write(json.dumps(fake_info))
file_obj.close()
def get_right_cases(resource_type):
basepath = 'resources/resource_file'
filepath_list = []
for parentDir, dirnames, filenames in os.walk(basepath):
for filename in filenames:
if filename.endswith('json') and resource_type.lower() in filename.lower():
resource_name = filename[:filename.find('_')] if '_' in filename else filename[:filename.find('.')]
fullFilename = (parentDir if parentDir.endswith('/') else parentDir + '/') + filename
if resource_name.lower() == resource_type.lower(): filepath_list.append(fullFilename)
#get json objs
cases = []
for fullFilename in filepath_list:
f = open(fullFilename, 'r')
cases.append(json.loads(f.read()))
f.close()
return cases
def create_all_test_case4type(resource_spec_filename,resource_type):
#load spec
csv_reader = csv.reader(open(resource_spec_filename, 'r'))
detail_dict = trans_csv_to_dict(csv_reader)
del csv_reader
#generate all cases
test_cases = create_element_test_cases(detail_dict)
right_cases, wrong_cases = create_orthogonal_test_cases(test_cases)
#wrap test cases
all_cases = {}
all_cases['right'] = get_right_cases(resource_type)
all_cases['wrong'] = []
# for case in right_cases:
# case['resourceType'] = resource_type
# all_cases['right'].append(case)
# get right cases from files instead
for case in wrong_cases:
case['case']['resourceType'] = resource_type
all_cases['wrong'].append(case)
#return all cases
return all_cases
def iter_all_cases(resource_type,step_obj, all_cases, url,id_dict, access_token=None):
#test right cases
isSuccessful = True
hint_infos = []
for case in all_cases['right']:
hint = ''
flag = True
case = set_reference(case,id_dict)
if 'id' in case:
del case['id']
# case = remove_none(case)
status_code, response, req_header, res_header = send_create_resource_request(json.dumps(case), url, access_token)
if status_code == 201 or status_code == 200 :
isSuccessful = isSuccessful and True
flag = True
else:
if isinstance(response, str):
hint += response
elif isinstance(response, dict):
hint += json.dumps(response['issue'])
isSuccessful = isSuccessful and False
flag = False
hint_infos.append({
'status': 0,
'desc': hint,
})
save_step_detail(step_obj, {
'status': 0,
'desc': 'Resource %s can not be processed. %s' %(resource_type, hint),
'req_header':req_header,
'res_header': res_header,
'response':response,
'resource':case,
'resource_name':resource_type
})
if isSuccessful:
hint_infos.append({
'desc': '%s in correct format can be processed properly' % resource_type,
'status':2
})
save_step_detail(step_obj, {
'desc': '%s in correct format can be processed properly' % resource_type,
'status':2,
'req_header':None,
'res_header': None,
'response':None,
'resource':None,
'resource_name':resource_type
})
isSuccessfulFalse = True
for case_with_info in all_cases['wrong']:
case = case_with_info['case']
hint = ''
status_code, response, req_header, res_header = send_create_resource_request(json.dumps(case), url, access_token)
if status_code == 201 or status_code == 200:
hint += case_with_info['info']
isSuccessfulFalse = isSuccessfulFalse and False
else:
isSuccessfulFalse = isSuccessfulFalse and True
if not isSuccessfulFalse:
hint_infos.append({
'status': 1,
'desc': hint
})
save_step_detail(step_obj, {
'status': 1,
'desc': hint,
'req_header':req_header,
'res_header': res_header,
'response':response,
'resource':case,
'resource_name':resource_type
})
if isSuccessfulFalse:
hint_infos.append({
'desc': '%s with error can be handled' % resource_type,
'status':2
})
save_step_detail(step_obj, {
'desc': '%s in incorrect format can be processed properly' % resource_type,
'status':2,
'req_header':None,
'res_header': None,
'response':None,
'resource':None,
'resource_name':resource_type
})
return isSuccessful, hint_infos
def ana_pre_creation_result(raw_info):
processed_info = {}
for key in raw_info:
if raw_info[key] and 'issue' in raw_info[key]:
if raw_info[key]['issue'][0]['severity'] == 'information':
processed_info[key] = True
else:
processed_info[key] = False
return processed_info
def level0Test(url,id_dict,step_obj, access_token=None):
#create basic observation
spec_filename = '%sObservation.csv' % spec_basepath
all_cases = create_all_test_case4type(spec_filename, 'Observation')
#send resource
#do test with all objects
if not url.endswith('/'):
url += '/'
isSuccessful, hint_infos = iter_all_cases('Observation',step_obj, all_cases, '%s%s' % (url, 'Observation'),id_dict, access_token)
return isSuccessful, hint_infos
def level1Test(url,id_dict,step_obj, access_token):
spec_filename = '%sObservation.csv' % spec_basepath
all_cases = create_all_test_case4type(spec_filename, 'Observation')
right_cases = all_cases['right']
isSuccessful = True
hint_infos = []
if not url.endswith('/'):
url += '/'
url += 'Observation'
for case in right_cases:
flag = True
case = set_reference(case,id_dict)
# case = remove_none(case)
case['extension'] = genetic_observation_extension
# print json.dumps(case)
hint = ''
status_code, response, req_header, res_header = send_create_resource_request(json.dumps(case), url, access_token)
print status_code
print isSuccessful
if status_code == 201 or status_code == 200 or status_code == '201' or status_code == '200':
isSuccessful = isSuccessful and True
flag = True
else:
if isinstance(response, str):
hint += response
elif isinstance(response, dict):
hint += json.dumps(response['issue'])
isSuccessful = isSuccessful and False
flag = False
hint_infos.append({
'status': 0,
'desc': hint
})
save_step_detail(step_obj, {
'status': 0,
'desc': hint,
'req_header':req_header,
'res_header': res_header,
'response':response,
'resource':case,
'resource_name':'Observation'
})
if isSuccessful:
hint_infos.append({
'status': 2,
'desc': 'Observation for genetic profile can be processed properly'
})
save_step_detail(step_obj, {
'desc': 'Observation for genetic profile can be processed properly',
'status':2,
'req_header':None,
'res_header': None,
'response':None,
'resource':None,
'resource_name':'Observation'
})
#TODO extension generate
return isSuccessful,hint_infos
def level2Test(url, id_dict,step_obj, access_token):
spec_filename = '%sObservation.csv' % spec_basepath
all_cases = create_all_test_case4type(spec_filename, 'Observation')
right_cases = all_cases['right']
isSuccessful = True
hint_infos = []
if not url.endswith('/'):
url += '/'
url += 'Observation'
for case in right_cases:
case = set_reference(case,id_dict)
# case = remove_none(case)
#add gene and Variant extension
case['extension'] = gene_variant_extension
hint = ''
# print json.dumps(case)
status_code, response, req_header, res_header = send_create_resource_request(json.dumps(case), url, access_token)
if status_code == 200 or status_code == 201:
isSuccessful = isSuccessful and True
else:
if isinstance(response, str):
hint += response
elif isinstance(response, dict):
hint += json.dumps(response['issue'])
isSuccessful = isSuccessful and False
hint_infos.append({
'status': 0,
'desc': hint
})
save_step_detail(step_obj, {
'status': 0,
'desc': hint,
'req_header':req_header,
'res_header': res_header,
'response':response,
'resource':case,
'resource_name':'Observation'
})
if isSuccessful:
hint_infos.append({
'status':2,
'desc':'Observation with Gene extension can be processed properly'
})
save_step_detail(step_obj, {
'desc': 'Observation with Gene extension can be processed properly',
'status':2,
'req_header':None,
'res_header': None,
'response':None,
'resource':None,
'resource_name':'Observation'
})
return isSuccessful,hint_infos
def level3Test(url, id_dict,step_obj, access_token):
spec_filename = '%sSequence.csv' % spec_basepath
all_cases = create_all_test_case4type(spec_filename, 'Sequence')
if not url.endswith('/'):
url += '/'
return iter_all_cases('Sequence',step_obj, all_cases, '%s%s' % (url, 'Sequence'),id_dict, access_token)
def level4Test(url, id_dict, step_obj, access_token):
if not url.endswith('/'):
url += '/'
isSuccessful = True
hint = ''
hint_infos = []
#get sequence resource file with repo
resource_filepath = '%sSequence_repo.json' % 'resources/resource_file/'
resource_file = open(resource_filepath,'r')
resource_obj = json.loads(resource_file.read())
print id_dict
resource_obj = set_reference(resource_obj,id_dict)
status_code, response, req_header, res_header = send_create_resource_request(json.dumps(resource_obj),'%sSequence'% url, access_token)
print 'sequence create %d' % status_code
if status_code != 200 and status_code != 201 and 'resourceType' in response:
isSuccessful = False
if isinstance(response, str):
hint += response
elif isinstance(response, dict):
hint += json.dumps(response['issue'])
else:
resource_id = None
if response['resourceType'] == 'OperationOutcome':
try:
issue_desc = response['issue'][0]
if issue_desc['severity'] == 'information':
resource_id = issue_desc['diagnostics'].split('/')[1]
except:
pass
elif response['resourceType'] == 'Sequence':
try:
resource_id = response['id']
except:
pass
#send read resource and then read ga4gh repo
if resource_id and len(resource_id) != 0:
isRepoSuccessful, req_header, res_heander = read_repo(resource_id, url, access_token)
isSuccessful = isSuccessful & isRepoSuccessful
else:
isSuccessful = False
hint_infos.append({
'status':2 if isSuccessful else 0,
'desc':'Repository %s be read' % ('can' if isSuccessful else 'cannot')
})
save_step_detail(step_obj, {
'desc':'Repository %s be read' % ('can' if isSuccessful else 'cannot'),
'status':2 if isSuccessful else 0,
'req_header':req_header,
'res_header': res_header,
'response':None if isSuccessful else response,
'resource':None,
'resource_name':'Sequence'
})
return isSuccessful, hint_infos
def read_repo(resource_id, url, access_token):
print resource_id
print 'reading repo'
status_code, response, req_header, res_header = send_read_resource_request("%sSequence/%s" %(url,resource_id), access_token)
isSuccessful = True
print 'sequence read %d' % status_code
if status_code >= 300:
isSuccessful = False
return isSuccessful,req_header,res_header
#read ga4gh
else:
variantId = None
print 'response'
print response['repository']
#get variant id
try:
variantId = response['repository'][0]['variantId']
except:
pass
print variantId
if variantId and len(variantId) != 0:
r = requests.get('%svariants/%s' % (ga4gh_server, variantId))
print r.status_code
if r.status_code > 300:
isSuccessful = False
return isSuccessful,r.request.headers, r.headers
return True, None, None
def random_picker(pick_list):
'''
pick a element from a list randomly
@param pick_list: list to pick element
@type pick_list: list
@return picked item
@rtype: obj
'''
low, high = 0, len(pick_list)-1
return pick_list[random.randint(low, high)]
# def level4Test(url, id_dict, step_obj, access_token):
# if not url.endswith('/'):
# url += '/'
# isSuccessful = True
# hint_infos = []
def level5Test(url, id_dict,step_obj, access_token):
if not url.endswith('/'):
url += '/'
isSuccessful = True
hint_infos = []
for resource_name in resource_list:
id_list = get_resource_id_list(url, resource_name, access_token)
hint = ''
flag = True
req_header = None
res_header = None
response = None
if id_list and len(id_list) > 0:
random_id = random_picker(id_list)
status_code, response, req_header, res_header = send_read_resource_request("%s%s/%s" %(url,resource_name,random_id), access_token)
if status_code == 201 or status_code == 200 or status_code == 302:
isSuccessful = isSuccessful and True
flag = True
else:
flag = False
isSuccessful = isSuccessful and False
# hint += response
if not isSuccessful:
hint_infos.append({
'status':0,
'desc': '%s reading failed, %s' % (resource_name, hint)
})
save_step_detail(step_obj, {
'status': 2 if flag else 0,
'resource_name':resource_name,
'desc': '%s reading %s, %s' % (resource_name, ('Success' if flag else 'Fail') ,hint),
'req_header':req_header,
'res_header': res_header,
'response':response,
'resource':None
})
if isSuccessful:
hint_infos.append({
'status':2,
'desc':'FHIR Genomics Resources can be retrived'
})
# save_step_detail(step_obj, {
# 'desc': 'FHIR Genomics Resources can be retrived',
# 'status':True,
# 'req_header':None,
# 'res_header': None,
# 'response':None,
# 'resource':None,
# 'resource_name':None
# })
return isSuccessful, hint_infos
def save_step_detail(step_obj, detail_info):
print detail_info['response']
with transaction.atomic():
new_step_detail = step_detail(step=step_obj)
new_step_detail.detail_desc = detail_info['desc']
new_step_detail.detail_status = detail_info['status']
new_step_detail.http_request = json.dumps(dict(detail_info['req_header'])) if detail_info['req_header'] else None
new_step_detail.http_response = json.dumps(dict(detail_info['res_header'])) if detail_info['res_header'] else None
new_step_detail.request_resource = json.dumps(dict(detail_info['resource'])) if detail_info['resource'] else None
new_step_detail.response_message = json.dumps(dict(detail_info['response'])) if detail_info['response'] else None
new_step_detail.resource_name = detail_info['resource_name']
try:
new_step_detail.save()
except:
print 'live create failed'
def create_one_step(task_id, step_info, step_obj=None):
save_step2fake(step_info)
if step_obj:
with transaction.atomic():
try:
step_obj.step_desc = step_info['desc']
step_obj.save()
return step_obj
except:
return None
pass
else:
with transaction.atomic():
new_task_step = task_steps(task_id=task_id, step_desc = step_info['desc'], name=step_info['name'])
try:
new_task_step.save()
except:
traceback.print_exc()
print 'step can not be created'
return None
return new_task_step
def form_new_step_info(status, base_desc, details,name):
new_step = {
'status': status,
'desc': base_desc,
'details': details,
'name':name
}
return new_step
def perform_a_test(test_method,step_obj ,url, id_dict, base_desc,name=None, access_token=None):
isSuccessful, hint_infos = test_method(url, id_dict, step_obj, access_token)
step_info = form_new_step_info(isSuccessful,'%s %s' % (base_desc, 'successfully' if isSuccessful else 'failed'), hint_infos, name)
return step_info
def do_standard_test(task_id, url, access_token=None, resources=["0","1","2","3","4","5"]):
#create pre resources
test_result = {
'level':[],
'steps':[]
}
level = []
step_info = form_new_step_info(True, 'Setting up standard test......', [], 'Setup')
step_obj = create_one_step(task_id ,step_info)
create_res, id_dict = create_pre_resources(url, 'resources', access_token)
# print id_dict
pre_resource_result = ana_pre_creation_result(create_res)
# print pre_resource_result
status = True
details = []
for key in pre_resource_result:
status = status and pre_resource_result[key]
detail_info = {'status': pre_resource_result[key]}
if pre_resource_result[key]:
detail_info['desc'] = '%s created successfully' % key
else:
detail_info['desc'] = '%s can not be created, test terminated' % key
details.append(detail_info)
step_info = form_new_step_info(status,'%s %s' % ('Setup', 'Successfully' if status else 'Failed'), details, 'Setup')
create_one_step(task_id ,step_info, step_obj)
#standard test begin
if "0" in resources:
step_info = form_new_step_info(True, 'Level 0 test performing', [], 'Level 0')
step_obj = create_one_step(task_id ,step_info)
step_info = perform_a_test(level0Test,step_obj, url, id_dict, 'Level 0 test', 'Level 0', access_token)
create_one_step(task_id, step_info, step_obj)
if step_info['status']:
test_result['level'].append('0')
if "1" in resources:
step_info = form_new_step_info(True, 'Level 1 test performing', [],'Level 1')
step_obj = create_one_step(task_id ,step_info)
step_info = perform_a_test(level1Test,step_obj, url, id_dict, 'Level 1 test', 'Level 1', access_token)
create_one_step(task_id, step_info, step_obj)
if step_info['status']:
test_result['level'].append('1')
if "2" in resources:
step_info = form_new_step_info(True, 'Level 2 test performing', [],'Level 2')
step_obj = create_one_step(task_id ,step_info)
step_info = perform_a_test(level2Test,step_obj, url, id_dict, 'Level 2 test','Level 2', access_token)
create_one_step(task_id, step_info, step_obj)
if step_info['status']:
test_result['level'].append('2')
if "3" in resources:
step_info = form_new_step_info(True, 'Level 3 test performing', [],'Level 3')
step_obj = create_one_step(task_id ,step_info)
step_info = perform_a_test(level3Test,step_obj, url, id_dict, 'Level 3 test','Level 3', access_token)
create_one_step(task_id, step_info, step_obj)
if step_info['status']:
test_result['level'].append('3')
if "4" in resources:
step_info = form_new_step_info(True, 'Level 4 test performing', [], 'Level 4')
step_obj = create_one_step(task_id, step_info)
step_info = perform_a_test(level4Test,step_obj,url, id_dict,'Level 4 test', 'Level 4', access_token)
create_one_step(task_id, step_info, step_obj)
if step_info['status']:
test_result['level'].append('4')
if "5" in resources:
step_info = form_new_step_info(True, 'Level 5 test performing', [],'Level 5')
step_obj = create_one_step(task_id ,step_info)
step_info = perform_a_test(level5Test,step_obj, url, id_dict, 'Level 5 test','Level 5', access_token)
create_one_step(task_id, step_info, step_obj)
if step_info['status']:
test_result['level'].append('5')
#save_fake2file()
return test_result['level'], id_dict
| |
#
# Copyright (c) 2014 by Christian E. Hopps.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, nested_scopes, print_function, unicode_literals
from ctypes import BigEndianStructure, create_string_buffer
from ctypes import c_uint8, c_uint16, c_uint32, sizeof
from pyisis.lib.util import cast_as, tlvrdb
import pyisis.clns as clns
import sys
VERYVERBOSE = False
class EtherHeader (BigEndianStructure):
_pack_ = 1
_fields_ = [ ("ether_dst", c_uint8 * 6),
("ether_src", c_uint8 * 6),
("ether_type", c_uint16), ]
def __str__ (self):
return "Ether(dst={},src={},typelen={})".format(clns.iso_decode(self.ether_dst),
clns.iso_decode(self.ether_src),
self.ether_type)
class LLCHeader (BigEndianStructure):
_pack_ = 1
_fields_ = [ ("llc_dsap", c_uint8),
("llc_ssap", c_uint8),
("llc_control", c_uint8) ]
def __str__ (self):
return "LLC(dsap={:#02x},ssap={:#02x},ctrl={:#02x})".format(self.llc_dsap,
self.llc_ssap,
self.llc_control)
class LLCFrame (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("mac_header", "llc_header")
_fields_ = [ ("mac_header", EtherHeader),
("llc_header", LLCHeader), ]
class CLNSHeader (BigEndianStructure):
_pack_ = 1
_fields_ = [
("clns_idrp", c_uint8),
("clns_len", c_uint8),
("clns_version", c_uint8),
("clns_sysid_len", c_uint8),
("clns_reserved1", c_uint8, 3),
("clns_pdu_type", c_uint8, 5),
("clns_version2", c_uint8),
("clns_reserved2", c_uint8),
("clns_max_area", c_uint8), ]
def __str__ (self):
fmtstr = ("CLNS(idrp={:#02x},len={},v={},idlen={}," +
"rsv1={},pdutype={},v2={},rsv2={},maxarea={})")
return fmtstr.format(self.clns_idrp,
self.clns_len,
self.clns_version,
self.clns_sysid_len,
self.clns_reserved1,
self.clns_pdu_type,
self.clns_version2,
self.clns_reserved2,
self.clns_max_area)
class CLNSEtherFrame (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("mac_header", "llc_header", "clns_header")
_fields_ = [
("mac_header", EtherHeader),
("llc_header", LLCHeader),
("clns_header", CLNSHeader), ]
class IIHLANHeader (BigEndianStructure):
_pack_ = 1
_fields_ = [
("circuit_type", c_uint8),
("source_id", c_uint8 * clns.CLNS_SYSID_LEN),
("hold_time", c_uint16),
("pdu_len", c_uint16),
("reserved", c_uint8, 1),
("priority", c_uint8, 7),
("lan_id", c_uint8 * clns.CLNS_LANID_LEN), ]
def __str__ (self):
fmtstr = "IIHLAN(ctype={},srcid={},holdtime={},len={},rsv={},pri={},lanid={})"
args = [self.circuit_type,
clns.iso_decode(self.source_id),
self.hold_time,
self.pdu_len,
self.reserved,
self.priority,
clns.iso_decode(self.lan_id)]
return fmtstr.format(*args) # pylint: disable=W0142
class IIHLANPDU (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("clns_header", "iih_header")
_fields_ = [
("clns_header", CLNSHeader),
("iih_header", IIHLANHeader), ]
def __str__ (self):
fmtstr = "{}"
args = [self.iih_header]
if VERYVERBOSE:
fmtstr += "\n [{}]"
args += [self.clns_header]
return fmtstr.format(*args) # pylint: disable=W0142
class IIHLANFrame (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("mac_header", "llc_header", "clns_header", "iih_header")
_fields_ = [
("mac_header", EtherHeader),
("llc_header", LLCHeader),
("clns_header", CLNSHeader),
("iih_header", IIHLANHeader), ]
def __str__ (self):
fmtstr = "{}"
args = [self.iih_header]
if VERYVERBOSE:
fmtstr += "\n [{}\n {}\n {}]"
args += [self.mac_header,
self.llc_header,
self.clns_header]
return fmtstr.format(*args) # pylint: disable=W0142
class IIHP2PHeader (BigEndianStructure):
_pack_ = 1
_fields_ = [
("circuit_type", c_uint8),
("source_id", c_uint8 * clns.CLNS_SYSID_LEN),
("hold_time", c_uint16),
("pdu_len", c_uint16),
("local_circuit_id", c_uint8), ]
def __str__ (self):
fmtstr = "IIHP2P(ctype={:#02x},srcid={},holdtime={},len={},lcircid={})"
args = [self.circuit_type,
clns.iso_decode(self.source_id),
self.hold_time,
self.pdu_len,
self.local_circuit_id]
return fmtstr.format(*args) # pylint: disable=W0142
class IIHP2PPDU (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("clns_header", "iih_header")
_fields_ = [
("clns_header", CLNSHeader),
("iih_header", IIHP2PHeader), ]
def __str__ (self):
fmtstr = "{}"
args = [self.iih_header]
if VERYVERBOSE:
fmtstr += "\n [{}]"
args += [self.clns_header]
return fmtstr.format(*args) # pylint: disable=W0142
class IIHP2PFrame (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("llc_header", "clns_header", "iih_header")
_fields_ = [
("llc_header", LLCHeader),
("clns_header", CLNSHeader),
("iih_header", IIHP2PHeader), ]
def __str__ (self):
fmtstr = "{}"
args = [self.iih_header]
if VERYVERBOSE:
fmtstr += "\n [{}\n {}]"
args += [self.llc_header,
self.clns_header]
return fmtstr.format(*args) # pylint: disable=W0142
class LSPHeader (BigEndianStructure):
_pack_ = 1
_fields_ = [
("pdu_len", c_uint16),
("lifetime", c_uint16),
("lspid", c_uint8 * clns.CLNS_LSPID_LEN),
("seqno", c_uint32),
("checksum", c_uint16),
("p_bit", c_uint8, 1),
("att_error", c_uint8, 1),
("att_expense", c_uint8, 1),
("att_delay", c_uint8, 1),
("att_default", c_uint8, 1),
("overload", c_uint8, 1),
("is_type", c_uint8, 2), ]
def __str__ (self):
fmtstr = ("LSP(len={},lifetime={},lspid={},seqno={:#010x},cksum={:#04x}," +
"pbit={},atterr={},attexp={},attdel={},attdef={},oload={},istype={})")
args = [self.pdu_len,
self.lifetime,
clns.iso_decode(self.lspid),
self.seqno,
self.checksum,
self.p_bit,
self.att_error,
self.att_expense,
self.att_delay,
self.att_default,
self.overload,
self.is_type]
return fmtstr.format(*args) # pylint: disable=W0142
class LSPZeroSegFrame (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("clns_header", "lsp_header")
_fields_ = [
("clns_header", CLNSHeader),
("lsp_header", LSPHeader), ]
def __str__ (self):
fmtstr = "{}"
args = [ self.lsp_header ]
if VERYVERBOSE:
fmtstr += "\n [{}]"
args += [self.clns_header]
return fmtstr.format(*args) # pylint: disable=W0142
class LSPPDU (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("clns_header", "lsp_header")
_fields_ = [
("clns_header", CLNSHeader),
("lsp_header", LSPHeader), ]
def __str__ (self):
fmtstr = "{}"
args = [ self.lsp_header ]
if VERYVERBOSE:
fmtstr += "\n [{}]"
args += [self.clns_header]
return fmtstr.format(*args) # pylint: disable=W0142
class LSPFrame (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("mac_header", "llc_header", "clns_header", "lsp_header")
_fields_ = [
("mac_header", EtherHeader),
("llc_header", LLCHeader),
("clns_header", CLNSHeader),
("lsp_header", LSPHeader), ]
def __str__ (self):
fmtstr = "{}"
args = [self.lsp_header]
if VERYVERBOSE:
fmtstr += "\n [{}\n {}\n {}]"
args += [self.mac_header,
self.llc_header,
self.clns_header]
return fmtstr.format(*args) # pylint: disable=W0142
class CSNPHeader (BigEndianStructure):
_pack_ = 1
_fields_ = [
("pdu_len", c_uint16),
("source_id", c_uint8 * clns.CLNS_NODEID_LEN),
("start_lspid", c_uint8 * clns.CLNS_LSPID_LEN),
("end_lspid", c_uint8 * clns.CLNS_LSPID_LEN), ]
def __str__ (self):
fmtstr = "CSNP(len={},srcid={},start={},end={})"
args = [self.pdu_len,
clns.iso_decode(self.source_id),
clns.iso_decode(self.start_lspid),
clns.iso_decode(self.end_lspid)]
return fmtstr.format(*args) # pylint: disable=W0142
class CSNPPDU (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("clns_header", "csnp_header")
_fields_ = [
("clns_header", CLNSHeader),
("csnp_header", CSNPHeader), ]
def __str__ (self):
fmtstr = "{}"
args = [self.csnp_header]
if VERYVERBOSE:
fmtstr += "\n [{}]"
args += [self.clns_header]
return fmtstr.format(*args) # pylint: disable=W0142
class CSNPFrame (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("mac_header", "llc_header", "clns_header", "csnp_header")
_fields_ = [
("mac_header", EtherHeader),
("llc_header", LLCHeader),
("clns_header", CLNSHeader),
("csnp_header", CSNPHeader), ]
def __str__ (self):
fmtstr = "{}"
args = [self.csnp_header]
if VERYVERBOSE:
fmtstr += "\n [{}\n {}\n {}]"
args += [self.mac_header,
self.llc_header,
self.clns_header]
return fmtstr.format(*args) # pylint: disable=W0142
class PSNPHeader (BigEndianStructure):
_pack_ = 1
_fields_ = [
("pdu_len", c_uint16),
("source_id", c_uint8 * clns.CLNS_NODEID_LEN), ]
def __str__ (self):
fmtstr = "PSNP(len={},srcid={})"
args = [self.pdu_len,
clns.iso_decode(self.source_id)]
return fmtstr.format(*args) # pylint: disable=W0142
class PSNPPDU (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("clns_header", "psnp_header")
_fields_ = [
("clns_header", CLNSHeader),
("psnp_header", PSNPHeader), ]
def __str__ (self):
fmtstr = "{}"
args = [self.psnp_header]
if VERYVERBOSE:
fmtstr += "\n [{}]"
args += [self.clns_header]
return fmtstr.format(*args) # pylint: disable=W0142
class PSNPFrame (BigEndianStructure):
_pack_ = 1
_anonymous_ = ("mac_header", "llc_header", "clns_header", "psnp_header")
_fields_ = [
("mac_header", EtherHeader),
("llc_header", LLCHeader),
("clns_header", CLNSHeader),
("psnp_header", PSNPHeader), ]
def __str__ (self):
fmtstr = "{}"
args = [self.psnp_header]
if VERYVERBOSE:
fmtstr += "\n [{}\n {}\n {}]"
args += [self.mac_header,
self.llc_header,
self.clns_header]
return fmtstr.format(*args) # pylint: disable=W0142
PDU_FRAME_TYPES = {
clns.PDU_TYPE_IIH_LAN_L1: IIHLANFrame,
clns.PDU_TYPE_IIH_LAN_L2: IIHLANFrame,
clns.PDU_TYPE_IIH_P2P: IIHP2PFrame,
clns.PDU_TYPE_LSP_L1: LSPFrame,
clns.PDU_TYPE_LSP_L2: LSPFrame,
clns.PDU_TYPE_CSNP_L1: CSNPFrame,
clns.PDU_TYPE_CSNP_L2: CSNPFrame,
clns.PDU_TYPE_PSNP_L1: PSNPFrame,
clns.PDU_TYPE_PSNP_L2: PSNPFrame,
}
PDU_PDU_TYPES = {
clns.PDU_TYPE_IIH_LAN_L1: IIHLANPDU,
clns.PDU_TYPE_IIH_LAN_L2: IIHLANPDU,
clns.PDU_TYPE_IIH_P2P: IIHP2PPDU,
clns.PDU_TYPE_LSP_L1: LSPPDU,
clns.PDU_TYPE_LSP_L2: LSPPDU,
clns.PDU_TYPE_CSNP_L1: CSNPPDU,
clns.PDU_TYPE_CSNP_L2: CSNPPDU,
clns.PDU_TYPE_PSNP_L1: PSNPPDU,
clns.PDU_TYPE_PSNP_L2: PSNPPDU,
}
PDU_FRAME_TYPE_LEVEL = {
clns.PDU_TYPE_IIH_LAN_L1: 1,
clns.PDU_TYPE_IIH_LAN_L2: 2,
clns.PDU_TYPE_LSP_L1: 1,
clns.PDU_TYPE_LSP_L2: 2,
clns.PDU_TYPE_CSNP_L1: 1,
clns.PDU_TYPE_CSNP_L2: 2,
clns.PDU_TYPE_PSNP_L1: 1,
clns.PDU_TYPE_PSNP_L2: 2,
}
PDU_FRAME_TYPE_LINDEX = {
clns.PDU_TYPE_IIH_LAN_L1: 0,
clns.PDU_TYPE_IIH_LAN_L2: 1,
clns.PDU_TYPE_LSP_L1: 0,
clns.PDU_TYPE_LSP_L2: 1,
clns.PDU_TYPE_CSNP_L1: 0,
clns.PDU_TYPE_CSNP_L2: 1,
clns.PDU_TYPE_PSNP_L1: 0,
clns.PDU_TYPE_PSNP_L2: 1,
}
OVERHEAD_LEN = sizeof(EtherHeader) + sizeof(LLCHeader)
PDU_HEADER_LEN = {
clns.PDU_TYPE_IIH_LAN_L1: sizeof(IIHLANFrame) - OVERHEAD_LEN,
clns.PDU_TYPE_IIH_LAN_L2: sizeof(IIHLANFrame) - OVERHEAD_LEN,
clns.PDU_TYPE_IIH_P2P: sizeof(IIHP2PFrame) - OVERHEAD_LEN,
clns.PDU_TYPE_LSP_L1: sizeof(LSPFrame) - OVERHEAD_LEN,
clns.PDU_TYPE_LSP_L2: sizeof(LSPFrame) - OVERHEAD_LEN,
clns.PDU_TYPE_CSNP_L1: sizeof(CSNPFrame) - OVERHEAD_LEN,
clns.PDU_TYPE_CSNP_L2: sizeof(CSNPFrame) - OVERHEAD_LEN,
clns.PDU_TYPE_PSNP_L1: sizeof(PSNPFrame) - OVERHEAD_LEN,
clns.PDU_TYPE_PSNP_L2: sizeof(PSNPFrame) - OVERHEAD_LEN,
}
def get_frame_level (pkt):
if not pkt:
return 0
offset = CLNSEtherFrame.clns_pdu_type.offset # pylint: disable=E1101
pdu_type = tlvrdb(memoryview(pkt)[offset])
try:
return PDU_FRAME_TYPE_LEVEL[pdu_type]
except KeyError:
return 0
def get_frame (pkt):
if not pkt:
return None
frame = cast_as(pkt, CLNSEtherFrame)
pdu_type = frame.clns_pdu_type
try:
return cast_as(pkt, PDU_FRAME_TYPES[pdu_type])
except KeyError:
return None
def get_raw_lsp_pdu (lindex):
pdu_type = clns.PDU_TYPE_LSP_LX[lindex]
lsp, buf, tlvview = get_pdu_buffer(clns.originatingLxLSPBufferSize(lindex), pdu_type)
tlvview = memoryview(buf)[sizeof(lsp):] # Get pointer to tlv space
return lsp, buf, tlvview
def get_pdu_buffer (size, pdu_type):
"""Get a PDU buffer of the given size cast to the correct type"""
if sys.version_info >= (3, 0):
buf = bytearray(size)
hdr = PDU_PDU_TYPES[pdu_type].from_buffer(buf)
else:
buf = create_string_buffer(size)
hdr = cast_as(buf, PDU_PDU_TYPES[pdu_type])
hdr.clns_idrp = clns.CLNS_IDRP_ISIS
hdr.clns_len = PDU_HEADER_LEN[pdu_type]
hdr.clns_version = clns.CLNS_VERSION
hdr.clns_sysid_len = 6
hdr.clns_reserved1 = 0
hdr.clns_pdu_type = pdu_type
hdr.clns_version2 = clns.CLNS_VERSION2
hdr.clns_reserved2 = 0
hdr.clns_max_area = 3
tlvview = memoryview(buf)[sizeof(hdr):]
return hdr, buf, tlvview
__author__ = 'Christian Hopps'
__date__ = 'November 7 2014'
__version__ = '1.0'
__docformat__ = "restructuredtext en"
| |
from datetime import datetime
from decimal import Decimal
from django import forms
from django.conf import settings
from django.contrib.admin import helpers
from django.contrib.admin.utils import (
NestedObjects, display_for_field, display_for_value, flatten,
flatten_fieldsets, label_for_field, lookup_field, quote,
)
from django.db import DEFAULT_DB_ALIAS, models
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils.formats import localize
from django.utils.safestring import mark_safe
from .models import (
Article, Car, Count, Event, EventGuide, Location, Site, Vehicle,
)
class NestedObjectsTests(TestCase):
"""
Tests for ``NestedObject`` utility collection.
"""
@classmethod
def setUpTestData(cls):
cls.n = NestedObjects(using=DEFAULT_DB_ALIAS)
cls.objs = [Count.objects.create(num=i) for i in range(5)]
def _check(self, target):
self.assertEqual(self.n.nested(lambda obj: obj.num), target)
def _connect(self, i, j):
self.objs[i].parent = self.objs[j]
self.objs[i].save()
def _collect(self, *indices):
self.n.collect([self.objs[i] for i in indices])
def test_unrelated_roots(self):
self._connect(2, 1)
self._collect(0)
self._collect(1)
self._check([0, 1, [2]])
def test_siblings(self):
self._connect(1, 0)
self._connect(2, 0)
self._collect(0)
self._check([0, [1, 2]])
def test_non_added_parent(self):
self._connect(0, 1)
self._collect(0)
self._check([0])
def test_cyclic(self):
self._connect(0, 2)
self._connect(1, 0)
self._connect(2, 1)
self._collect(0)
self._check([0, [1, [2]]])
def test_queries(self):
self._connect(1, 0)
self._connect(2, 0)
# 1 query to fetch all children of 0 (1 and 2)
# 1 query to fetch all children of 1 and 2 (none)
# Should not require additional queries to populate the nested graph.
self.assertNumQueries(2, self._collect, 0)
def test_on_delete_do_nothing(self):
"""
The nested collector doesn't query for DO_NOTHING objects.
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
objs = [Event.objects.create()]
EventGuide.objects.create(event=objs[0])
with self.assertNumQueries(2):
# One for Location, one for Guest, and no query for EventGuide
n.collect(objs)
def test_relation_on_abstract(self):
"""
NestedObjects.collect() doesn't trip (AttributeError) on the special
notation for relations on abstract models (related_name that contains
%(app_label)s and/or %(class)s) (#21846).
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
Car.objects.create()
n.collect([Vehicle.objects.first()])
class UtilsTests(SimpleTestCase):
empty_value = '-empty-'
def test_values_from_lookup_field(self):
"""
Regression test for #12654: lookup_field
"""
SITE_NAME = 'example.com'
TITLE_TEXT = 'Some title'
CREATED_DATE = datetime.min
ADMIN_METHOD = 'admin method'
SIMPLE_FUNCTION = 'function'
INSTANCE_ATTRIBUTE = 'attr'
class MockModelAdmin:
def get_admin_value(self, obj):
return ADMIN_METHOD
def simple_function(obj):
return SIMPLE_FUNCTION
site_obj = Site(domain=SITE_NAME)
article = Article(
site=site_obj,
title=TITLE_TEXT,
created=CREATED_DATE,
)
article.non_field = INSTANCE_ATTRIBUTE
verifications = (
('site', SITE_NAME),
('created', localize(CREATED_DATE)),
('title', TITLE_TEXT),
('get_admin_value', ADMIN_METHOD),
(simple_function, SIMPLE_FUNCTION),
('test_from_model', article.test_from_model()),
('non_field', INSTANCE_ATTRIBUTE)
)
mock_admin = MockModelAdmin()
for name, value in verifications:
field, attr, resolved_value = lookup_field(name, article, mock_admin)
if field is not None:
resolved_value = display_for_field(resolved_value, field, self.empty_value)
self.assertEqual(value, resolved_value)
def test_null_display_for_field(self):
"""
Regression test for #12550: display_for_field should handle None
value.
"""
display_value = display_for_field(None, models.CharField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.CharField(
choices=(
(None, "test_none"),
)
), self.empty_value)
self.assertEqual(display_value, "test_none")
display_value = display_for_field(None, models.DateField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.TimeField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
# Regression test for #13071: NullBooleanField has special
# handling.
display_value = display_for_field(None, models.NullBooleanField(), self.empty_value)
expected = '<img src="%sadmin/img/icon-unknown.svg" alt="None">' % settings.STATIC_URL
self.assertHTMLEqual(display_value, expected)
display_value = display_for_field(None, models.BooleanField(null=True), self.empty_value)
expected = '<img src="%sadmin/img/icon-unknown.svg" alt="None" />' % settings.STATIC_URL
self.assertHTMLEqual(display_value, expected)
display_value = display_for_field(None, models.DecimalField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.FloatField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.JSONField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
def test_json_display_for_field(self):
tests = [
({'a': {'b': 'c'}}, '{"a": {"b": "c"}}'),
(['a', 'b'], '["a", "b"]'),
('a', '"a"'),
({('a', 'b'): 'c'}, "{('a', 'b'): 'c'}"), # Invalid JSON.
]
for value, display_value in tests:
with self.subTest(value=value):
self.assertEqual(
display_for_field(value, models.JSONField(), self.empty_value),
display_value,
)
def test_number_formats_display_for_field(self):
display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value)
self.assertEqual(display_value, '12345.6789')
display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value)
self.assertEqual(display_value, '12345.6789')
display_value = display_for_field(12345, models.IntegerField(), self.empty_value)
self.assertEqual(display_value, '12345')
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_number_formats_with_thousand_separator_display_for_field(self):
display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value)
self.assertEqual(display_value, '12,345.6789')
display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value)
self.assertEqual(display_value, '12,345.6789')
display_value = display_for_field(12345, models.IntegerField(), self.empty_value)
self.assertEqual(display_value, '12,345')
def test_list_display_for_value(self):
display_value = display_for_value([1, 2, 3], self.empty_value)
self.assertEqual(display_value, '1, 2, 3')
display_value = display_for_value([1, 2, 'buckle', 'my', 'shoe'], self.empty_value)
self.assertEqual(display_value, '1, 2, buckle, my, shoe')
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_list_display_for_value_boolean(self):
self.assertEqual(
display_for_value(True, '', boolean=True),
'<img src="/static/admin/img/icon-yes.svg" alt="True">'
)
self.assertEqual(
display_for_value(False, '', boolean=True),
'<img src="/static/admin/img/icon-no.svg" alt="False">'
)
self.assertEqual(display_for_value(True, ''), 'True')
self.assertEqual(display_for_value(False, ''), 'False')
def test_label_for_field(self):
"""
Tests for label_for_field
"""
self.assertEqual(
label_for_field("title", Article),
"title"
)
self.assertEqual(
label_for_field("hist", Article),
"History"
)
self.assertEqual(
label_for_field("hist", Article, return_attr=True),
("History", None)
)
self.assertEqual(
label_for_field("__str__", Article),
"article"
)
with self.assertRaisesMessage(AttributeError, "Unable to lookup 'unknown' on Article"):
label_for_field("unknown", Article)
def test_callable(obj):
return "nothing"
self.assertEqual(
label_for_field(test_callable, Article),
"Test callable"
)
self.assertEqual(
label_for_field(test_callable, Article, return_attr=True),
("Test callable", test_callable)
)
self.assertEqual(
label_for_field("test_from_model", Article),
"Test from model"
)
self.assertEqual(
label_for_field("test_from_model", Article, return_attr=True),
("Test from model", Article.test_from_model)
)
self.assertEqual(
label_for_field("test_from_model_with_override", Article),
"not What you Expect"
)
self.assertEqual(
label_for_field(lambda x: "nothing", Article),
"--"
)
self.assertEqual(label_for_field('site_id', Article), 'Site id')
class MockModelAdmin:
def test_from_model(self, obj):
return "nothing"
test_from_model.short_description = "not Really the Model"
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin),
"not Really the Model"
)
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin, return_attr=True),
("not Really the Model", MockModelAdmin.test_from_model)
)
def test_label_for_field_form_argument(self):
class ArticleForm(forms.ModelForm):
extra_form_field = forms.BooleanField()
class Meta:
fields = '__all__'
model = Article
self.assertEqual(
label_for_field('extra_form_field', Article, form=ArticleForm()),
'Extra form field'
)
msg = "Unable to lookup 'nonexistent' on Article or ArticleForm"
with self.assertRaisesMessage(AttributeError, msg):
label_for_field('nonexistent', Article, form=ArticleForm()),
def test_label_for_property(self):
# NOTE: cannot use @property decorator, because of
# AttributeError: 'property' object has no attribute 'short_description'
class MockModelAdmin:
def my_property(self):
return "this if from property"
my_property.short_description = 'property short description'
test_from_property = property(my_property)
self.assertEqual(
label_for_field("test_from_property", Article, model_admin=MockModelAdmin),
'property short description'
)
def test_related_name(self):
"""
Regression test for #13963
"""
self.assertEqual(
label_for_field('location', Event, return_attr=True),
('location', None),
)
self.assertEqual(
label_for_field('event', Location, return_attr=True),
('awesome event', None),
)
self.assertEqual(
label_for_field('guest', Event, return_attr=True),
('awesome guest', None),
)
def test_safestring_in_field_label(self):
# safestring should not be escaped
class MyForm(forms.Form):
text = forms.CharField(label=mark_safe('<i>text</i>'))
cb = forms.BooleanField(label=mark_safe('<i>cb</i>'))
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline"><i>text</i>:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline"><i>cb</i></label>')
# normal strings needs to be escaped
class MyForm(forms.Form):
text = forms.CharField(label='&text')
cb = forms.BooleanField(label='&cb')
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline">&text:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline">&cb</label>')
def test_flatten(self):
flat_all = ['url', 'title', 'content', 'sites']
inputs = (
((), []),
(('url', 'title', ('content', 'sites')), flat_all),
(('url', 'title', 'content', 'sites'), flat_all),
((('url', 'title'), ('content', 'sites')), flat_all)
)
for orig, expected in inputs:
self.assertEqual(flatten(orig), expected)
def test_flatten_fieldsets(self):
"""
Regression test for #18051
"""
fieldsets = (
(None, {
'fields': ('url', 'title', ('content', 'sites'))
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
fieldsets = (
(None, {
'fields': ('url', 'title', ['content', 'sites'])
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
def test_quote(self):
self.assertEqual(quote('something\nor\nother'), 'something_0Aor_0Aother')
| |
from django.db.models.query import Q
from djblets.webapi.decorators import webapi_request_fields
from djblets.webapi.fields import (BooleanFieldType,
IntFieldType,
StringFieldType)
from djblets.webapi.resources.user import UserResource as DjbletsUserResource
from reviewboard.reviews.models import Group, ReviewRequest
from reviewboard.search import search_backend_registry
from reviewboard.search.forms import RBSearchForm
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import (webapi_check_login_required,
webapi_check_local_site)
class SearchResource(WebAPIResource, DjbletsUserResource):
"""
Provides information on users, groups and review requests.
This is the resource for the autocomplete widget for quick search. This
resource helps filter for users, groups and review requests.
"""
added_in = '1.6'
name = 'search'
singleton = True
MIN_SUMMARY_LEN = 4
def has_access_permissions(self, *args, **kwargs):
"""Return whether or not users have access to this resource.
This resource is accessible to any users that have access to the API.
Args:
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
bool:
Always ``True``.
"""
return True
@webapi_request_fields(
optional={
'q': {
'type': StringFieldType,
'description': 'The text to search for.',
},
'displayname': {
'type': BooleanFieldType,
'description': 'This field is deprecated and ignored. It '
'will be removed in a future release of '
'Review Board.',
},
'fullname': {
'type': BooleanFieldType,
'description': 'Whether or not to include users whose full '
'name includes the search text.',
},
'id': {
'type': IntFieldType,
'description': 'A specific review request ID to search for.',
},
'max_results': {
'type': IntFieldType,
'description': 'The maximum number of results to return '
'for each type of matching object. By '
'default, this is 25. There is a hard limit '
'of 200.',
},
},
allow_unknown=True,
)
@webapi_check_local_site
@webapi_check_login_required
def get(self, request, max_results=None, *args, **kwargs):
"""Returns information on users, groups and review requests.
This is used by the autocomplete widget for quick search to get
information on users, groups and review requests. This function returns
users' first name, last name and username, groups' name and display
name, and review requests' ID and summary.
"""
max_results = min((max_results or 25), 200)
try:
# We have to keep the parameter named id for backwards
# compatibility, but it would override the builtin of the same
# name.
kwargs['id_q'] = kwargs.pop('id')
except KeyError:
pass
return 200, {
self.name: {
'users': self._search_users(
request=request,
max_results=max_results,
*args,
**kwargs
),
'groups': self._search_groups(
request=request,
max_results=max_results,
*args,
**kwargs
),
'review_requests': self._search_review_requests(
request=request,
max_results=max_results,
*args,
**kwargs
)
},
}
def _search_users(self, request, max_results, local_site=None,
fullname=None, q=None, id_q=None, *args, **kwargs):
"""Search for users and return the results.
Args:
request (django.http.HttpRequest):
The current request.
max_results (int):
The maximum number of results to return.
local_site (reviewboard.site.models.LocalSite, optional):
The current local site.
fullname (bool, optional):
Whether or not to perform a search against the users' full
names.
q (unicode, optional):
The search text.
id_q (int, optional):
An optional ID to search against user IDs.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
django.db.models.query.QuerySet or list:
A query set for users matching the given arguments.
"""
if (search_backend_registry.search_enabled and
search_backend_registry.on_the_fly_indexing_enabled):
# If search is enabled, we will use the index to perform the query.
form = RBSearchForm(
user=request.user,
local_site=local_site,
data={
'q': q,
'id_q': id_q,
'model_filter': [RBSearchForm.FILTER_USERS],
}
)
results = []
for result in form.search()[:max_results]:
raw_user = {
'id': result.pk,
'username': result.username,
'url': result.url,
}
if not result.is_profile_private:
raw_user['fullname'] = result.full_name
results.append(raw_user)
return results
# If search is disabled, we will fall back to using database queries.
if local_site:
users = local_site.users.filter(is_active=True)
else:
users = self.model.objects.filter(is_active=True)
if q:
parts = q.split(' ', 1)
if len(parts) > 1:
query = (
(Q(first_name__istartswith=parts[0]) &
Q(last_name__istartswith=parts[1])) |
(Q(first_name__istartswith=parts[1]) &
Q(last_name__istartswith=parts[0]))
)
if fullname:
query |= (Q(first_name__istartswith=q) |
Q(last_name__istartswith=q))
query &= Q(profile__is_private=False)
else:
query = (Q(username__istartswith=q) |
(Q(profile__is_private=False) &
(Q(first_name__istartswith=q) |
Q(last_name__istartswith=q))))
users = users.filter(query)
return users[:max_results]
def _search_groups(self, request, max_results, local_site=None, q=None,
*args, **kwargs):
"""Search for review groups and return the results.
Args:
request (django.http.HttpRequest):
The current HTTP request.
max_results (int):
The maximum number of results to return.
local_site (reviewboard.site.models.LocalSite, optional):
The current local site.
q (unicode, optional):
The search text.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
django.db.models.query.QuerySet:
A query set for review groups matching the given arguments.
"""
groups = Group.objects.accessible(request.user, local_site=local_site)
if q:
groups = groups.filter(
Q(name__istartswith=q) |
Q(display_name__istartswith=q)
)
# Group.objects.accessible only respects visible_only for
# non-superusers. We add this here to make the behavior consistent.
return groups.filter(visible=True)[:max_results]
def _search_review_requests(self, request, max_results, local_site=None,
q=None, id_q=None, *args, **kwargs):
"""Search for a review request and return the results.
If indexed search is enabled, this will use the search index. Otherwise
it will query against the database.
Args:
local_site (reviewboard.site.models.LocalSite, optional):
The current local site.
max_results (int):
The maximum number of results to return.
q (unicode, optional):
The search text.
id_q (int, optional):
An optional ID to search against review request IDs.
*args (tuple):
Ignored positional arguments.
**kwargs (dict):
Ignored keyword arguments.
Returns:
django.db.models.query.QuerySet or haystack.query.SearchQuerySet:
A query for review requests matching the given arguments.
"""
if (search_backend_registry.search_enabled and
search_backend_registry.on_the_fly_indexing_enabled):
# If search is enabled, we will use the index to perform the query.
form = RBSearchForm(
user=request.user,
local_site=local_site,
data={
'q': q,
'id': id_q,
'model_filter': [RBSearchForm.FILTER_REVIEW_REQUESTS],
}
)
return [
{
'id': result.review_request_id,
'public': True, # Drafts are not indexed.
'summary': result.summary,
}
for result in form.search()[:max_results]
]
# If search is disabled, we will fall back to using database queries.
review_requests = ReviewRequest.objects.public(
filter_private=True,
user=request.user,
local_site=local_site,
status=None,
)
query = Q()
if q:
if local_site:
query |= Q(local_id__istartswith=q)
else:
query |= Q(id__startswith=q)
if len(q) >= self.MIN_SUMMARY_LEN:
query |= Q(summary__istartswith=q)
if id_q:
if local_site:
query |= Q(local_id__startswith=id_q)
else:
query |= Q(id__startswith=id_q)
return review_requests.filter(query)[:max_results]
search_resource = SearchResource()
| |
import numpy
import six
from chainer import cuda
from chainer.functions.array import permutate
from chainer.functions.array import transpose_sequence
from chainer.functions.connection import n_step_gru as rnn
from chainer.initializers import normal
from chainer import link
from chainer.links.connection.n_step_rnn import argsort_list_descent
from chainer.links.connection.n_step_rnn import permutate_list
from chainer.utils import argument
from chainer import variable
class NStepGRUBase(link.ChainList):
"""__init__(self, n_layers, in_size, out_size, dropout, use_bi_direction)
Base link class for Stacked GRU/BiGRU links.
This link is base link class for :func:`chainer.links.NStepRNN` and
:func:`chainer.links.NStepBiRNN`.
This link's behavior depends on argument, ``use_bi_direction``.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
use_bi_direction (bool): if ``True``, use Bi-directional GRU.
if ``False``, use Uni-directional GRU.
.. seealso::
:func:`chainer.links.NStepGRU`
:func:`chainer.links.NStepBiGRU`
"""
def __init__(self, n_layers, in_size, out_size, dropout, use_bi_direction,
**kwargs):
argument.check_unexpected_kwargs(
kwargs, use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
weights = []
direction = 2 if use_bi_direction else 1
for i in six.moves.range(n_layers):
for di in six.moves.range(direction):
weight = link.Link()
with weight.init_scope():
for j in six.moves.range(6):
if i == 0 and j < 3:
w_in = in_size
elif i > 0 and j < 3:
w_in = out_size * direction
else:
w_in = out_size
w = variable.Parameter(
normal.Normal(numpy.sqrt(1. / w_in)),
(out_size, w_in))
b = variable.Parameter(0, (out_size,))
setattr(weight, 'w%d' % j, w)
setattr(weight, 'b%d' % j, b)
weights.append(weight)
super(NStepGRUBase, self).__init__(*weights)
self.n_layers = n_layers
self.dropout = dropout
self.out_size = out_size
self.direction = direction
self.rnn = rnn.n_step_bigru if use_bi_direction else rnn.n_step_gru
def init_hx(self, xs):
shape = (self.n_layers * self.direction, len(xs), self.out_size)
with cuda.get_device_from_id(self._device_id):
hx = variable.Variable(self.xp.zeros(shape, dtype=xs[0].dtype))
return hx
def __call__(self, hx, xs, **kwargs):
"""__call__(self, hx, xs)
Calculate all hidden states and cell states.
.. warning::
``train`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
hx (~chainer.Variable or None): Initial hidden states. If ``None``
is specified zero-vector is used.
xs (list of ~chianer.Variable): List of input sequences.
Each element ``xs[i]`` is a :class:`chainer.Variable` holding
a sequence.
"""
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
assert isinstance(xs, (list, tuple))
indices = argsort_list_descent(xs)
xs = permutate_list(xs, indices, inv=False)
if hx is None:
hx = self.init_hx(xs)
else:
hx = permutate.permutate(hx, indices, axis=1, inv=False)
trans_x = transpose_sequence.transpose_sequence(xs)
ws = [[w.w0, w.w1, w.w2, w.w3, w.w4, w.w5] for w in self]
bs = [[w.b0, w.b1, w.b2, w.b3, w.b4, w.b5] for w in self]
hy, trans_y = self.rnn(
self.n_layers, self.dropout, hx, ws, bs, trans_x)
hy = permutate.permutate(hy, indices, axis=1, inv=True)
ys = transpose_sequence.transpose_sequence(trans_y)
ys = permutate_list(ys, indices, inv=True)
return hy, ys
class NStepGRU(NStepGRUBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Uni-directional GRU for sequnces.
This link is stacked version of Uni-directional GRU for sequences.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_gru`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_gru`
"""
def __init__(self, n_layers, in_size, out_size, dropout, **kwargs):
NStepGRUBase.__init__(
self, n_layers, in_size, out_size, dropout,
use_bi_direction=False, **kwargs)
class NStepBiGRU(NStepGRUBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Bi-directional GRU for sequnces.
This link is stacked version of Bi-directional GRU for sequences.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_bigru`, this function automatically
sort inputs in descending order by length, and transpose the sequence.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_bigru`
"""
def __init__(self, n_layers, in_size, out_size, dropout, **kwargs):
NStepGRUBase.__init__(
self, n_layers, in_size, out_size, dropout,
use_bi_direction=True, **kwargs)
| |
# Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vmax import utils
LOG = logging.getLogger(__name__)
WRITE_DISABLED = "Write Disabled"
UNLINK_INTERVAL = 15
UNLINK_RETRIES = 30
class VMAXProvision(object):
"""Provisioning Class for Dell EMC VMAX volume drivers.
It supports VMAX arrays.
"""
def __init__(self, rest):
self.utils = utils.VMAXUtils()
self.rest = rest
def create_storage_group(
self, array, storagegroup_name, srp, slo, workload,
extra_specs, do_disable_compression=False):
"""Create a new storage group.
:param array: the array serial number
:param storagegroup_name: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extra_specs: additional info
:param do_disable_compression: disable compression flag
:returns: storagegroup - storage group object
"""
start_time = time.time()
@coordination.synchronized("emc-sg-{storage_group}")
def do_create_storage_group(storage_group):
storagegroup = self.rest.create_storage_group(
array, storage_group, srp, slo, workload, extra_specs,
do_disable_compression)
LOG.debug("Create storage group took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
LOG.info("Storage group %(sg)s created successfully.",
{'sg': storagegroup_name})
return storagegroup
return do_create_storage_group(storagegroup_name)
def create_volume_from_sg(self, array, volume_name, storagegroup_name,
volume_size, extra_specs):
"""Create a new volume in the given storage group.
:param array: the array serial number
:param volume_name: the volume name (String)
:param storagegroup_name: the storage group name
:param volume_size: volume size (String)
:param extra_specs: the extra specifications
:returns: dict -- volume_dict - the volume dict
"""
@coordination.synchronized("emc-sg-{storage_group}")
def do_create_volume_from_sg(storage_group):
start_time = time.time()
volume_dict = self.rest.create_volume_from_sg(
array, volume_name, storage_group,
volume_size, extra_specs)
LOG.debug("Create volume from storage group "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
return volume_dict
return do_create_volume_from_sg(storagegroup_name)
def delete_volume_from_srp(self, array, device_id, volume_name):
"""Delete a volume from the srp.
:param array: the array serial number
:param device_id: the volume device id
:param volume_name: the volume name
"""
start_time = time.time()
LOG.debug("Delete volume %(volume_name)s from srp.",
{'volume_name': volume_name})
self.rest.delete_volume(array, device_id)
LOG.debug("Delete volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(
start_time, time.time())})
def create_volume_snapvx(self, array, source_device_id,
snap_name, extra_specs):
"""Create a snapVx of a volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param snap_name: the snapshot name
:param extra_specs: the extra specifications
"""
start_time = time.time()
LOG.debug("Create Snap Vx snapshot of: %(source)s.",
{'source': source_device_id})
self.rest.create_volume_snap(
array, snap_name, source_device_id, extra_specs)
LOG.debug("Create volume snapVx took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def create_volume_replica(
self, array, source_device_id, target_device_id,
snap_name, extra_specs, create_snap=False):
"""Create a snap vx of a source and copy to a target.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
:param create_snap: Flag for create snapvx
"""
start_time = time.time()
if create_snap:
self.create_volume_snapvx(array, source_device_id,
snap_name, extra_specs)
# Link source to target
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, link=True)
LOG.debug("Create element replica took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def break_replication_relationship(
self, array, target_device_id, source_device_id, snap_name,
extra_specs):
"""Unlink a snapshot from its target volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
"""
LOG.debug("Break snap vx link relationship between: %(src)s "
"and: %(tgt)s.",
{'src': source_device_id, 'tgt': target_device_id})
self._unlink_volume(array, source_device_id, target_device_id,
snap_name, extra_specs)
def _unlink_volume(
self, array, source_device_id, target_device_id, snap_name,
extra_specs):
"""Unlink a target volume from its source volume.
:param array: the array serial number
:param source_device_id: the source device id
:param target_device_id: the target device id
:param snap_name: the snap name
:param extra_specs: extra specifications
:return: return code
"""
def _unlink_vol():
"""Called at an interval until the synchronization is finished.
:raises: loopingcall.LoopingCallDone
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['modify_vol_success']:
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, unlink=True)
kwargs['modify_vol_success'] = True
except exception.VolumeBackendAPIException:
pass
if kwargs['retries'] > UNLINK_RETRIES:
LOG.error("_unlink_volume failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(retvalue=30)
if kwargs['modify_vol_success']:
raise loopingcall.LoopingCallDone()
kwargs = {'retries': 0,
'modify_vol_success': False}
timer = loopingcall.FixedIntervalLoopingCall(_unlink_vol)
rc = timer.start(interval=UNLINK_INTERVAL).wait()
return rc
def delete_volume_snap(self, array, snap_name, source_device_id):
"""Delete a snapVx snapshot of a volume.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_id: the source device id
"""
LOG.debug("Delete SnapVx: %(snap_name)s for volume %(vol)s.",
{'vol': source_device_id, 'snap_name': snap_name})
self.rest.delete_volume_snap(array, snap_name, source_device_id)
def delete_temp_volume_snap(self, array, snap_name, source_device_id):
"""Delete the temporary snapshot created for clone operations.
There can be instances where the source and target both attempt to
delete a temp snapshot simultaneously, so we must lock the snap and
then double check it is on the array.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_id: the source device id
"""
@coordination.synchronized("emc-snapvx-{snapvx_name}")
def do_delete_temp_snap(snapvx_name):
# Ensure snap has not been recently deleted
if self.rest.get_volume_snap(
array, source_device_id, snapvx_name):
self.delete_volume_snap(array, snapvx_name, source_device_id)
do_delete_temp_snap(snap_name)
def delete_volume_snap_check_for_links(self, array, snap_name,
source_device, extra_specs):
"""Check if a snap has any links before deletion.
If a snapshot has any links, break the replication relationship
before deletion.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device: the source device id
:param extra_specs: the extra specifications
"""
LOG.debug("Check for linked devices to SnapVx: %(snap_name)s "
"for volume %(vol)s.",
{'vol': source_device, 'snap_name': snap_name})
linked_list = self.rest.get_snap_linked_device_list(
array, source_device, snap_name)
for link in linked_list:
target_device = link['targetDevice']
self.break_replication_relationship(
array, target_device, source_device, snap_name, extra_specs)
self.delete_volume_snap(array, snap_name, source_device)
def extend_volume(self, array, device_id, new_size, extra_specs):
"""Extend a volume.
:param array: the array serial number
:param device_id: the volume device id
:param new_size: the new size (GB)
:param extra_specs: the extra specifications
:returns: status_code
"""
start_time = time.time()
self.rest.extend_volume(array, device_id, new_size, extra_specs)
LOG.debug("Extend VMAX volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def get_srp_pool_stats(self, array, array_info):
"""Get the srp capacity stats.
:param array: the array serial number
:param array_info: the array dict
:returns: total_capacity_gb
:returns: remaining_capacity_gb
:returns: subscribed_capacity_gb
:returns: array_reserve_percent
:returns: wlp_enabled
"""
total_capacity_gb = 0
remaining_capacity_gb = 0
allocated_capacity_gb = None
subscribed_capacity_gb = 0
array_reserve_percent = 0
wlp_enabled = False
srp = array_info['srpName']
LOG.debug(
"Retrieving capacity for srp %(srpName)s on array %(array)s.",
{'srpName': srp, 'array': array})
srp_details = self.rest.get_srp_by_name(array, srp)
if not srp_details:
LOG.error("Unable to retrieve srp instance of %(srpName)s on "
"array %(array)s.",
{'srpName': srp, 'array': array})
return 0, 0, 0, 0, False
try:
total_capacity_gb = srp_details['total_usable_cap_gb']
allocated_capacity_gb = srp_details['total_allocated_cap_gb']
subscribed_capacity_gb = srp_details['total_subscribed_cap_gb']
remaining_capacity_gb = float(
total_capacity_gb - allocated_capacity_gb)
array_reserve_percent = srp_details['reserved_cap_percent']
except KeyError:
pass
total_slo_capacity = (
self._get_remaining_slo_capacity_wlp(
array, srp, array_info))
if total_slo_capacity != -1 and allocated_capacity_gb:
remaining_capacity_gb = float(
total_slo_capacity - allocated_capacity_gb)
wlp_enabled = True
else:
LOG.debug(
"Remaining capacity %(remaining_capacity_gb)s "
"GBs is determined from SRP capacity "
"and not the SLO capacity. Performance may "
"not be what you expect.",
{'remaining_capacity_gb': remaining_capacity_gb})
return (total_capacity_gb, remaining_capacity_gb,
subscribed_capacity_gb, array_reserve_percent, wlp_enabled)
def _get_remaining_slo_capacity_wlp(self, array, srp, array_info):
"""Get the remaining capacity of the SLO/ workload combination.
This is derived from the WLP portion of Unisphere. Please
see the UniSphere doc and the readme doc for details.
:param array: the array serial number
:param srp: the srp name
:param array_info: array info dict
:returns: remaining_capacity
"""
remaining_capacity = -1
if array_info['SLO']:
headroom_capacity = self.rest.get_headroom_capacity(
array, srp, array_info['SLO'], array_info['Workload'])
if headroom_capacity:
remaining_capacity = headroom_capacity
LOG.debug("Received remaining SLO Capacity %(remaining)s GBs "
"for SLO %(SLO)s and workload %(workload)s.",
{'remaining': remaining_capacity,
'SLO': array_info['SLO'],
'workload': array_info['Workload']})
return remaining_capacity
def verify_slo_workload(self, array, slo, workload, srp):
"""Check if SLO and workload values are valid.
:param array: the array serial number
:param slo: Service Level Object e.g bronze
:param workload: workload e.g DSS
:param srp: the storage resource pool name
:returns: boolean
"""
is_valid_slo, is_valid_workload = False, False
if workload and workload.lower() == 'none':
workload = None
if not workload:
is_valid_workload = True
if slo and slo.lower() == 'none':
slo = None
valid_slos = self.rest.get_slo_list(array)
valid_workloads = self.rest.get_workload_settings(array)
for valid_slo in valid_slos:
if slo == valid_slo:
is_valid_slo = True
break
for valid_workload in valid_workloads:
if workload == valid_workload:
is_valid_workload = True
break
if not slo:
is_valid_slo = True
if workload:
is_valid_workload = False
if not is_valid_slo:
LOG.error(
"SLO: %(slo)s is not valid. Valid values are: "
"%(valid_slos)s.", {'slo': slo, 'valid_slos': valid_slos})
if not is_valid_workload:
LOG.error(
"Workload: %(workload)s is not valid. Valid values are "
"%(valid_workloads)s. Note you cannot "
"set a workload without an SLO.",
{'workload': workload, 'valid_workloads': valid_workloads})
return is_valid_slo, is_valid_workload
def get_slo_workload_settings_from_storage_group(
self, array, sg_name):
"""Get slo and workload settings from a storage group.
:param array: the array serial number
:param sg_name: the storage group name
:returns: storage group slo settings
"""
slo = 'NONE'
workload = 'NONE'
storage_group = self.rest.get_storage_group(array, sg_name)
if storage_group:
try:
slo = storage_group['slo']
workload = storage_group['workload']
except KeyError:
pass
else:
exception_message = (_(
"Could not retrieve storage group %(sg_name)s. ") %
{'sg_name': sg_name})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(data=exception_message)
return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload}
def break_rdf_relationship(self, array, device_id, target_device,
rdf_group, rep_extra_specs, state):
"""Break the rdf relationship between a pair of devices.
:param array: the array serial number
:param device_id: the source device id
:param target_device: target device id
:param rdf_group: the rdf group number
:param rep_extra_specs: replication extra specs
:param state: the state of the rdf pair
"""
LOG.info("Splitting rdf pair: source device: %(src)s "
"target device: %(tgt)s.",
{'src': device_id, 'tgt': target_device})
if state == 'Synchronized':
self.rest.modify_rdf_device_pair(
array, device_id, rdf_group, rep_extra_specs, split=True)
LOG.info("Deleting rdf pair: source device: %(src)s "
"target device: %(tgt)s.",
{'src': device_id, 'tgt': target_device})
self.rest.delete_rdf_pair(array, device_id, rdf_group)
def failover_volume(self, array, device_id, rdf_group,
extra_specs, local_vol_state, failover):
"""Failover or back a volume pair.
:param array: the array serial number
:param device_id: the source device id
:param rdf_group: the rdf group number
:param extra_specs: extra specs
:param local_vol_state: the local volume state
:param failover: flag to indicate failover or failback -- bool
"""
if local_vol_state == WRITE_DISABLED:
LOG.info("Volume %(dev)s is already failed over.",
{'dev': device_id})
return
if failover:
action = "Failing over"
else:
action = "Failing back"
LOG.info("%(action)s rdf pair: source device: %(src)s ",
{'action': action, 'src': device_id})
self.rest.modify_rdf_device_pair(
array, device_id, rdf_group, extra_specs, split=False)
def create_volume_group(self, array, group_name, extra_specs):
"""Create a generic volume group.
:param array: the array serial number
:param group_name: the name of the group
:param extra_specs: the extra specifications
:returns: volume_group
"""
return self.create_storage_group(array, group_name,
None, None, None, extra_specs)
def create_group_replica(
self, array, source_group, snap_name, extra_specs):
"""Create a replica (snapVx) of a volume group.
:param array: the array serial number
:param source_group: the source group name
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
"""
LOG.debug("Creating Snap Vx snapshot of storage group: %(srcGroup)s.",
{'srcGroup': source_group})
# Create snapshot
self.rest.create_storagegroup_snap(
array, source_group, snap_name, extra_specs)
def delete_group_replica(self, array, snap_name,
source_group_name):
"""Delete the snapshot.
:param array: the array serial number
:param snap_name: the name for the snap shot
:param source_group_name: the source group name
"""
# Delete snapvx snapshot
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name,
'snap_name': snap_name})
# The check for existence of snapshot has already happened
# So we just need to delete the snapshot
self.rest.delete_storagegroup_snap(array, snap_name, source_group_name)
def link_and_break_replica(self, array, source_group_name,
target_group_name, snap_name, extra_specs,
delete_snapshot=False):
"""Links a group snap and breaks the relationship.
:param array: the array serial
:param source_group_name: the source group name
:param target_group_name: the target group name
:param snap_name: the snapshot name
:param extra_specs: extra specifications
:param delete_snapshot: delete snapshot flag
"""
LOG.debug("Linking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
# Link the snapshot
self.rest.modify_storagegroup_snap(
array, source_group_name, target_group_name, snap_name,
extra_specs, link=True)
# Unlink the snapshot
LOG.debug("Unlinking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
self._unlink_group(array, source_group_name,
target_group_name, snap_name, extra_specs)
# Delete the snapshot if necessary
if delete_snapshot:
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name,
'snap_name': snap_name})
self.rest.delete_storagegroup_snap(array, snap_name,
source_group_name)
def _unlink_group(
self, array, source_group_name, target_group_name, snap_name,
extra_specs):
"""Unlink a target group from it's source group.
:param array: the array serial number
:param source_group_name: the source group name
:param target_group_name: the target device name
:param snap_name: the snap name
:param extra_specs: extra specifications
:returns: return code
"""
def _unlink_grp():
"""Called at an interval until the synchronization is finished.
:raises: loopingcall.LoopingCallDone
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['modify_grp_snap_success']:
self.rest.modify_storagegroup_snap(
array, source_group_name, target_group_name,
snap_name, extra_specs, unlink=True)
kwargs['modify_grp_snap_success'] = True
except exception.VolumeBackendAPIException:
pass
if kwargs['retries'] > UNLINK_RETRIES:
LOG.error("_unlink_grp failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(retvalue=30)
if kwargs['modify_grp_snap_success']:
raise loopingcall.LoopingCallDone()
kwargs = {'retries': 0,
'modify_grp_snap_success': False}
timer = loopingcall.FixedIntervalLoopingCall(_unlink_grp)
rc = timer.start(interval=UNLINK_INTERVAL).wait()
return rc
| |
"""Network Authentication Helpers
Contains interface (MultiDomainBasicAuth) and associated glue code for
providing credentials in the context of network requests.
"""
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
import logging
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.utils import get_netrc_auth
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._internal.utils.misc import (
ask,
ask_input,
ask_password,
remove_auth_from_url,
split_auth_netloc_from_url,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Dict, Optional, Tuple
from pip._internal.vcs.versioncontrol import AuthInfo
Credentials = Tuple[str, str, str]
logger = logging.getLogger(__name__)
try:
import keyring # noqa
except ImportError:
keyring = None
except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s", str(exc),
)
keyring = None
def get_keyring_auth(url, username):
"""Return the tuple auth for a given url from keyring."""
if not url or not keyring:
return None
try:
try:
get_credential = keyring.get_credential
except AttributeError:
pass
else:
logger.debug("Getting credentials from keyring for %s", url)
cred = get_credential(url, username)
if cred is not None:
return cred.username, cred.password
return None
if username:
logger.debug("Getting password from keyring for %s", url)
password = keyring.get_password(url, username)
if password:
return username, password
except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s", str(exc),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True, index_urls=None):
# type: (bool, Optional[Values]) -> None
self.prompting = prompting
self.index_urls = index_urls
self.passwords = {} # type: Dict[str, AuthInfo]
# When the user is prompted to enter credentials and keyring is
# available, we will offer to save them. If the user accepts,
# this value is set to the credentials they entered. After the
# request authenticates, the caller should call
# ``save_credentials`` to save these.
self._credentials_to_save = None # type: Optional[Credentials]
def _get_index_url(self, url):
"""Return the original index URL matching the requested URL.
Cached or dynamically generated credentials may work against
the original index URL rather than just the netloc.
The provided url should have had its username and password
removed already. If the original index url had credentials then
they will be included in the return value.
Returns None if no matching index was found, or if --no-index
was specified by the user.
"""
if not url or not self.index_urls:
return None
for u in self.index_urls:
prefix = remove_auth_from_url(u).rstrip("/") + "/"
if url.startswith(prefix):
return u
def _get_new_credentials(self, original_url, allow_netrc=True,
allow_keyring=True):
"""Find and return credentials for the specified URL."""
# Split the credentials and netloc from the url.
url, netloc, url_user_password = split_auth_netloc_from_url(
original_url,
)
# Start with the credentials embedded in the url
username, password = url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in url for %s", netloc)
return url_user_password
# Find a matching index url for this request
index_url = self._get_index_url(url)
if index_url:
# Split the credentials from the url.
index_info = split_auth_netloc_from_url(index_url)
if index_info:
index_url, _, index_url_user_password = index_info
logger.debug("Found index url %s", index_url)
# If an index URL was found, try its embedded credentials
if index_url and index_url_user_password[0] is not None:
username, password = index_url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in index url for %s", netloc)
return index_url_user_password
# Get creds from netrc if we still don't have them
if allow_netrc:
netrc_auth = get_netrc_auth(original_url)
if netrc_auth:
logger.debug("Found credentials in netrc for %s", netloc)
return netrc_auth
# If we don't have a password and keyring is available, use it.
if allow_keyring:
# The index url is more specific than the netloc, so try it first
kr_auth = (
get_keyring_auth(index_url, username) or
get_keyring_auth(netloc, username)
)
if kr_auth:
logger.debug("Found credentials in keyring for %s", netloc)
return kr_auth
return username, password
def _get_url_and_credentials(self, original_url):
"""Return the credentials to use for the provided URL.
If allowed, netrc and keyring may be used to obtain the
correct credentials.
Returns (url_without_credentials, username, password). Note
that even if the original URL contains credentials, this
function may return a different username and password.
"""
url, netloc, _ = split_auth_netloc_from_url(original_url)
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
if username is None and password is None:
# No stored credentials. Acquire new credentials without prompting
# the user. (e.g. from netrc, keyring, or the URL itself)
username, password = self._get_new_credentials(original_url)
if username is not None or password is not None:
# Convert the username and password if they're None, so that
# this netloc will show up as "cached" in the conditional above.
# Further, HTTPBasicAuth doesn't accept None, so it makes sense to
# cache the value that is going to be used.
username = username or ""
password = password or ""
# Store any acquired credentials.
self.passwords[netloc] = (username, password)
assert (
# Credentials were found
(username is not None and password is not None) or
# Credentials were not found
(username is None and password is None)
), "Could not load credentials from url: {}".format(original_url)
return url, username, password
def __call__(self, req):
# Get credentials for this request
url, username, password = self._get_url_and_credentials(req.url)
# Set the url of the request to the url without any credentials
req.url = url
if username is not None and password is not None:
# Send the basic auth with this request
req = HTTPBasicAuth(username, password)(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
# Factored out to allow for easy patching in tests
def _prompt_for_password(self, netloc):
username = ask_input("User for {}: ".format(netloc))
if not username:
return None, None
auth = get_keyring_auth(netloc, username)
if auth:
return auth[0], auth[1], False
password = ask_password("Password: ")
return username, password, True
# Factored out to allow for easy patching in tests
def _should_save_password_to_keyring(self):
if not keyring:
return False
return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y"
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username, password, save = self._prompt_for_password(parsed.netloc)
# Store the new username and password to use for future requests
self._credentials_to_save = None
if username is not None and password is not None:
self.passwords[parsed.netloc] = (username, password)
# Prompt to save the password to keyring
if save and self._should_save_password_to_keyring():
self._credentials_to_save = (parsed.netloc, username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
req.register_hook("response", self.warn_on_401)
# On successful request, save the credentials that were used to
# keyring. (Note that if the user responded "no" above, this member
# is not set and nothing will be saved.)
if self._credentials_to_save:
req.register_hook("response", self.save_credentials)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def warn_on_401(self, resp, **kwargs):
"""Response callback to warn about incorrect credentials."""
if resp.status_code == 401:
logger.warning(
'401 Error, Credentials not correct for %s', resp.request.url,
)
def save_credentials(self, resp, **kwargs):
"""Response callback to save credentials on success."""
assert keyring is not None, "should never reach here without keyring"
if not keyring:
return
creds = self._credentials_to_save
self._credentials_to_save = None
if creds and resp.status_code < 400:
try:
logger.info('Saving credentials to keyring')
keyring.set_password(*creds)
except Exception:
logger.exception('Failed to save credentials')
| |
import functools
import operator
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import average_pooling_nd_kernel
from chainer.functions.pooling import pooling_nd
from chainer.utils import conv
from chainer.utils import conv_nd
def _get_conv_slices(
size, k, s, p, cover_all=False, d=1, include_pad=True, dtype='l'):
"""Returns the patch slices.
Returns:
A tuple of two 1-D :class:`numpy.ndarrays`\\ s.
Each represents starting and ending indices of the patches.
"""
n = conv.get_conv_outsize(size, k, s, p, cover_all, d)
starts = -p + numpy.arange(n, dtype=dtype) * s
ends = starts + k
if not include_pad:
starts = numpy.maximum(starts, 0)
ends = numpy.minimum(ends, size)
return starts, ends
class AveragePoolingND(pooling_nd._PoolingND):
"""Average pooling over a set of N-dimensional planes.
.. warning::
This feature is experimental. The interface can change in the future.
"""
def __init__(
self, ndim, ksize, stride=None, pad=0, cover_all=False,
pad_value=0):
if not (pad_value is None or pad_value == 0):
raise ValueError(
'pad_value must be either 0 or None, not {}.'.format(
pad_value))
# TODO(takagi) Support cover_all mode.
if cover_all is True:
raise ValueError('`cover_all` mode is not supported yet.')
super(AveragePoolingND, self).__init__(
ndim, ksize, stride=stride, pad=pad, cover_all=cover_all)
self.pad_value = pad_value
def _get_pooling_width(self, xp, dims, dtype):
width = None
for d, k, s, p in six.moves.zip(
dims, self.ksize, self.stride, self.pad):
starts, ends = _get_conv_slices(
d, k, s, p, cover_all=self.cover_all, include_pad=False,
dtype=dtype)
w = ends - starts
if width is None:
width = w
else:
width = numpy.tensordot(width[..., None], w[None, ...], axes=1)
if xp is not numpy:
width = cuda.cupy.array(width)
return width
def forward_cpu(self, inputs):
x, = inputs
self._in_shape = x.shape
self._in_dtype = x.dtype
col = conv_nd.im2col_nd_cpu(
x, self.ksize, self.stride, self.pad, cover_all=self.cover_all)
# mean along (_, _, k_1, k_2, ..., k_N, _, ..., _)
y_axis = tuple(six.moves.range(2, 2 + len(self.ksize)))
if self.pad_value is None:
dims = x.shape[2:]
width = self._get_pooling_width(numpy, dims, x.dtype)
y = col.sum(axis=y_axis) / width
else:
assert self.pad_value == 0
y = col.mean(axis=y_axis)
return y,
def forward_gpu(self, inputs):
if chainer.should_use_cudnn('>=auto') and 2 <= self.ndim <= 3:
# With cuDNN v3 or greater, use cuDNN implementation for inputs
# with spatial dimensions of two or more.
self.retain_inputs((0,))
return super(AveragePoolingND, self).forward_gpu(inputs)
x, = inputs
self._in_shape = x.shape
self._in_dtype = x.dtype
n, c = x.shape[:2]
idims = x.shape[2:]
odims = tuple(
conv.get_conv_outsize(d, k, s, p, cover_all=self.cover_all)
for (d, k, s, p) in six.moves.zip(
idims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + odims
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
if self.pad_value is None:
coeff = self._get_pooling_width(cuda.cupy, idims, x.dtype)
coeff = cuda.cupy.reciprocal(coeff, out=coeff)
else:
assert self.pad_value == 0
coeff = 1. / functools.reduce(operator.mul, self.ksize)
in_params, out_params, operation, name = \
average_pooling_nd_kernel.AveragePoolingNDKernelForward.generate(
self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(idims + odims + self.ksize + self.stride + self.pad
+ (coeff, y)))
return y,
def backward(self, indexes, gy):
return AveragePoolingNDGrad(self).apply(gy)
def create_pool_desc(self):
if self.pad_value is None:
pooling_mode = (
cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING)
else:
assert self.pad_value == 0
pooling_mode = (
cuda.cuda.cudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING)
return cuda.cudnn.create_pooling_descriptor(
self.ksize, self.stride, self.pad, pooling_mode)
class AveragePoolingNDGrad(function_node.FunctionNode):
def __init__(self, apoolnd):
self.ndim = apoolnd.ndim
self.ksize = apoolnd.ksize
self.stride = apoolnd.stride
self.pad = apoolnd.pad
self.cover_all = apoolnd.cover_all
self._used_cudnn = apoolnd._used_cudnn
if not self._used_cudnn:
self._in_shape = apoolnd._in_shape
self._in_dtype = apoolnd._in_dtype
self.pad_value = apoolnd.pad_value
self.apoolnd = apoolnd
def forward_cpu(self, gys):
gy, = gys
idims = self._in_shape[2:]
odims = gy.shape[2:]
colon = slice(None, None, None)
gy_index = (colon, colon) + (None,) * len(idims)
gcol_reps = (1, 1) + self.ksize + (1,) * len(odims)
gcol = numpy.tile(gy[gy_index], gcol_reps)
gx = conv_nd.col2im_nd_cpu(gcol, self.stride, self.pad, idims)
if self.pad_value is None:
width = self._get_pooling_width(numpy, odims, gx.dtype)
numpy.divide(gx, width, out=gx)
else:
gx /= functools.reduce(operator.mul, self.ksize)
return gx,
def forward_gpu(self, gys):
if self._used_cudnn:
x, = self.apoolnd.get_retained_inputs()
return self.apoolnd.backward_gpu((x.data,), gys)
gy, = gys
n, c = self._in_shape[:2]
idims = self._in_shape[2:]
odims = gy.shape[2:]
gx = cuda.cupy.empty(self._in_shape, self._in_dtype)
if self.pad_value is None:
coeff = self._get_pooling_width(cuda.cupy, odims, gy.dtype)
coeff = cuda.cupy.reciprocal(coeff, out=coeff)
else:
coeff = 1. / functools.reduce(operator.mul, self.ksize)
in_params, out_params, operation, name = \
average_pooling_nd_kernel.AveragePoolingNDKernelBackward.generate(
self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
gy.reduced_view(),
*(idims + odims + self.ksize + self.stride + self.pad
+ (coeff, gx)))
return gx,
def backward(self, indexes, grad_outputs):
return AveragePoolingND(
self.ndim, self.ksize, self.stride, self.pad,
cover_all=False).apply(grad_outputs)
def average_pooling_nd(x, ksize, stride=None, pad=0, pad_value=0):
"""N-dimensionally spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
This function provides a N-dimensionally generalized version of
:func:`~chainer.functions.average_pooling_2d`. This acts similarly to
:func:`~chainer.functions.convolution_nd`, but it computes the average of
input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x(~chainer.Variable): Input variable.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
pad_value (0 or None):
Value to fill the padded region when calculating average.
If ``None`` is specified, such region is ignored.
The default value is ``0``, therefore the averages are biased
towards zero.
Returns:
~chainer.Variable: Output variable.
.. note::
This function currently does not support ``cover_all`` mode as
:func:`max_pooling_nd`. Average pooling runs in non-cover-all mode.
"""
ndim = len(x.shape[2:])
return AveragePoolingND(
ndim, ksize, stride=stride, pad=pad, pad_value=pad_value
).apply((x,))[0]
def average_pooling_1d(x, ksize, stride=None, pad=0, pad_value=0):
"""1-dimensional spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.average_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.average_pooling_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return average_pooling_nd(x, ksize, stride, pad, pad_value)
def average_pooling_3d(x, ksize, stride=None, pad=0, pad_value=0):
"""3-dimensional spatial average pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.average_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.average_pooling_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return average_pooling_nd(x, ksize, stride, pad, pad_value)
| |
"""HTTP REST API server. """
from collections import OrderedDict
import calendar
import datetime
import json
import time
import pytz
from bottle import Bottle, run, request, response, debug, HTTPResponse
from agenda import ds, RedisDatastore, AgendaController, ShiftNotEmptyError, NotAvailableSlotError
# Settings
AUTH = ("user", "s3cr3ts3cr3t")
DEFAULT_TZ = "Europe/Madrid"
DEFAULT_PATH = "http://localhos:8008/agendas/shifts/%s"
UTCTIMEFORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOCALTIMEFORMAT = "%Y-%m-%d %H:%M:%S"
# Converters
def epoch(a_dtstring):
tt = time.strptime(a_dtstring, UTCTIMEFORMAT)
return int(calendar.timegm(tt))
def today(context=None):
context = context or {}
zone = context.get("zone", DEFAULT_TZ)
localtz = pytz.timezone(zone)
dt = datetime.date.today()
dt = datetime.datetime.combine(dt, datetime.time(0))
dt = dt.replace(tzinfo=localtz).astimezone(pytz.utc)
return calendar.timegm(dt.timetuple())
def tomorrow(context=None):
context = context or {}
zone = context.get("zone", DEFAULT_TZ)
localtz = pytz.timezone(zone)
dt = datetime.date.today() + datetime.timedelta(days=1)
dt = datetime.datetime.combine(dt, datetime.time(0))
dt = dt.replace(tzinfo=localtz).astimezone(pytz.utc)
return calendar.timegm(dt.timetuple())
def epoch2datetime(an_epoch):
return datetime.datetime.utcfromtimestamp(an_epoch).replace(tzinfo=pytz.utc)
def filter_request(form_dict, name, to_python, default=None):
try:
return to_python(form_dict[name])
except KeyError:
return None
except (ValueError, TypeError):
return default
# Authentication
def require_authentication(fn):
def new_function(*args, **kwargs):
if request.auth == AUTH:
return fn(*args, **kwargs)
else:
return render_to_error(403, "Incorrect credentials.")
return new_function
# Helpers
def dict_to_response(items, status=200, headers=None):
"""Returns a HTTPResponse with ``items`` as a JSON.
The parameter ``data``could be a mapping or a sequence, a container
that supports iteration, or an iterator object. The elements of the
argument must each also be of one of those kinds, and each must in
turn contain exactly two objects. The first is used as a key in the
new dictionary, and the second as the key's value.
"""
headers = headers or {}
payload = OrderedDict(items)
payload["status"] = status
output = json.dumps(payload, indent=2)
for key, value in headers.iteritems():
response.headers[key] = value
response.set_header('Content-Type', 'application/json')
headers = {'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*', }
return HTTPResponse(status=status, body=output, **headers)
def render_epoch(an_epoch, context=None):
context = context or {}
zone = context.get("zone", DEFAULT_TZ)
localtz = pytz.timezone(zone)
dt = epoch2datetime(an_epoch)
return OrderedDict((
("datetime", dt.strftime(UTCTIMEFORMAT)),
("timestamp", an_epoch),
("localtime", dt.astimezone(localtz).strftime(LOCALTIMEFORMAT)),
("timezone", zone)
))
def render_shift(shift, context=None):
context = context or {}
return OrderedDict((
("kind", "shift"),
("id", shift.key),
("name", "Testing"),
("href", context.get("href", "")),
("start", render_epoch(shift.interval.start, context)),
("end", render_epoch(shift.interval.end, context))
))
def render_shifts(shifts, context=None):
context = context or {}
return OrderedDict((
("kind", "shifts"),
("shifts", [render_shift(shift, context) for shift in shifts])
))
def render_agenda(agenda, context=None):
context = context or {}
return OrderedDict((
("id", agenda.key),
("name", "Testing"),
))
def render_slot(slot, context=None):
context = context or {}
path = context.get("path", DEFAULT_PATH)
return OrderedDict((
("kind", "freeslot"),
("href", path),
("start", render_epoch(slot.start, context)),
("end", render_epoch(slot.end, context))
))
def render_slots(slots, context=None):
context = context or {}
return OrderedDict((
("kind", "freeslots"),
("freeslots", [render_slot(slot, context) for slot in slots])
))
def render_appointments(appos, context=None):
context = context or {}
return OrderedDict((
("kind", "appointments"),
("appointments", [render_appointment(appo, context) for appo in appos])
))
def render_appointment(appo, context=None):
context = context or {}
return OrderedDict((
("kind", "appointment"),
("id", appo.key),
("shift_id", appo.parent_key),
("href", context.get("href", "")),
("start", render_epoch(appo.interval.start, context)),
("end", render_epoch(appo.interval.end, context))
))
def render_to_error(status, message):
headers = {'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*', }
output = json.dumps({"status": status, "message": message})
return HTTPResponse(status=status, body=output, **headers)
# Shortcuts
def get_agenda_or_404(aid):
try:
agenda = AgendaController(aid)
except KeyError:
raise render_to_error(404, "Agenda was not found.")
return agenda
class Context(dict):
def __init__(self, request, *args, **kwargs):
super(Context, self).__init__(*args, **kwargs)
self.process_request(request)
def __getattr__(self, key):
return self.__getitem__(key)
def __setattr__(self, key, value):
return self.__setitem__(key, value)
def process_request(self, request):
(scheme, host, _, _, _) = request.urlparts
self.update(url=scheme + "://" + host)
def error404(error):
return render_to_error(error.status, "The URL is not found ")
def error500(error):
return render_to_error(error.status, "Internal error.")
# Routes
def options():
response.headers["Allow"] = "GET,HEAD,POST,OPTIONS,DELETE"
return HTTPResponse(status=200, output="")
def test():
"""Test the server.
Echoes string and integer and converts a string datetime to epoch."""
if request.method == "GET":
my_string = filter_request(request.query, 'string', str)
my_integer = filter_request(request.query, 'integer', int)
my_epoch = filter_request(request.query, 'datetime', epoch)
return dict_to_response(dict(string=my_string, integer=my_integer, epoch=my_epoch))
elif request.method == "POST":
my_string = filter_request(request.forms, 'string', str)
my_integer = filter_request(request.forms, 'integer', int)
my_epoch = filter_request(request.forms, 'datetime', epoch)
return dict_to_response(dict(string=my_string, integer=my_integer, epoch=my_epoch))
def test_post():
"""Test the server.
Echoes string and integer and converts a string datetime to epoch."""
if request.content_type == "application/x-www-form-urlencoded":
my_string = filter_request(request.forms, 'string', str)
my_integer = filter_request(request.forms, 'integer', int)
my_epoch = filter_request(request.forms, 'datetime', epoch)
d = dict(string=my_string, integer=my_integer, epoch=my_epoch)
elif request.content_type == "application/json":
d = request.json
# Query
d["query"] = filter_request(request.query, 'query', str)
return dict_to_response(d)
@require_authentication
def get_agenda(aid):
agenda = get_agenda_or_404(aid)
return dict_to_response(render_agenda(agenda))
@require_authentication
def post_agenda():
agenda = AgendaController()
context = Context(request)
path = "/agendas/{agenda}".format(agenda=agenda.key)
href = context.url + path
context["href"] = href
headers = {"Location": href}
return dict_to_response(render_agenda(agenda, context), 201, headers)
@require_authentication
def get_slots(aid):
length = filter_request(request.query, "length", int)
start_from = filter_request(request.query, "start", epoch)
start_until = filter_request(request.query, "end", epoch)
agenda = get_agenda_or_404(aid)
intervals = agenda.get_slots(length, start_from, start_until)
return dict_to_response(render_slots(intervals))
@require_authentication
def get_free_slots(aid):
length = filter_request(request.query, "length", int)
start = filter_request(request.query, "start", epoch) or today()
end = filter_request(request.query, "end", epoch) or tomorrow()
agenda = get_agenda_or_404(aid)
intervals = agenda.get_free_slots(start, end, length)
return dict_to_response(render_slots(intervals))
@require_authentication
def get_appointment(aid, app_id):
agenda = get_agenda_or_404(aid)
appo = agenda.get_appointment(app_id)
return dict_to_response(render_appointment(appo))
@require_authentication
def get_appointments(aid):
agenda = get_agenda_or_404(aid)
appos = agenda.get_appointments_itervalues()
return dict_to_response(render_appointments(appos))
@require_authentication
def delete_appointment(aid, app_id):
agenda = get_agenda_or_404(aid)
try:
agenda.del_appointment(app_id)
except KeyError:
return render_to_error(404, "Appointment was not found.")
return dict_to_response((), 204)
@require_authentication
def get_shifts(aid):
agenda = get_agenda_or_404(aid)
shifts = agenda.get_shifts_itervalues()
return dict_to_response(render_shifts(shifts))
@require_authentication
def get_shift(aid, sid):
agenda = get_agenda_or_404(aid)
shift = agenda.get_shift(sid)
return dict_to_response(render_shift(shift))
@require_authentication
def post_shift(aid):
start = filter_request(request.forms, "start", epoch)
end = filter_request(request.forms, "end", epoch)
if start == None or end == None:
return render_to_error(403, "Incorrect parameter value.")
agenda = get_agenda_or_404(aid)
shift = agenda.add_shift(start, end)
context = Context(request)
path = "/agendas/{aid}/shifts/{sid}".format(aid=aid, sid=shift.key)
href = context.url + path
context["href"] = href
headers = {"Location": href}
return dict_to_response(render_shift(shift, context), 201, headers)
@require_authentication
def delete_shift(aid, sid):
agenda = get_agenda_or_404(aid)
try:
_ = agenda.del_shift(sid)
except KeyError:
return render_to_error(404, "Shift %s was not found." % sid)
except ShiftNotEmptyError:
return render_to_error(409, "Shift %s is not empty. Please, first delete all appointments." % sid)
return dict_to_response((), 204)
@require_authentication
def post_appointment(aid):
start = filter_request(request.forms, "start", epoch)
end = filter_request(request.forms, "end", epoch)
if start == None or end == None:
return render_to_error(400, "Incorrect parameter value.")
agenda = get_agenda_or_404(aid)
try:
appo = agenda.add_appointment(start, end)
except NotAvailableSlotError:
return render_to_error(409, "Appointment overlaps. Please, choose another slot.")
context = Context(request)
path = "/agendas/{agenda}/shifts/{shift}/appointments/{appo}".format(
agenda=aid, shift=appo.parent_key, appo=appo.key)
href = context.url + path
context["href"] = href
headers = {"Location": href}
return dict_to_response(render_appointment(appo, context), 201, headers)
def setup_routing(app):
app.route('/test', ['GET', ], test)
app.route('/test', ['POST', ], test_post)
app.route("/*", "OPTIONS", options)
app.route('/agendas/<aid:int>', "GET" , get_agenda)
app.route('/agendas', "POST", post_agenda)
app.route('/agendas/<aid:int>/shifts', "GET", get_shifts)
app.route('/agendas/<aid:int>/shifts/<sid:int>', "GET", get_shift)
app.route('/agendas/<aid:int>/shifts', "POST", post_shift)
app.route('/agendas/<aid:int>/shifts/<sid:int>', "DELETE", delete_shift)
app.route('/agendas/<aid:int>/appointments/<app_id:int>', "GET", get_appointment)
app.route('/agendas/<aid:int>/appointments', "GET", get_appointments)
app.route('/agendas/<aid:int>/appointments/<app_id:int>', "DELETE", delete_appointment)
app.route('/agendas/<aid:int>/appointments', "POST", post_appointment)
app.route('/agendas/<aid:int>/slots', "GET", get_slots)
app.route('/agendas/<aid:int>/freeslots', "GET", get_free_slots)
def setup_error_handling(app):
app.error_handler[404] = error404
app.error_handler[500] = error500
# Main
debug(True)
setattr(ds, 'datastore', RedisDatastore())
app = Bottle()
setup_routing(app)
setup_error_handling(app)
if __name__ == '__main__':
run(app, host='localhost', port=8008, reloader=True)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Train an MLP on MNIST using K-FAC.
This library fits a 3-layer, tanh-activated MLP on MNIST using K-FAC. After
~25k steps, this should reach perfect accuracy on the training set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.kfac.examples import mnist
lc = tf.contrib.kfac.layer_collection
opt = tf.contrib.kfac.optimizer
__all__ = [
"fc_layer",
"train_mnist",
"train_mnist_multitower",
]
def fc_layer(layer_id, inputs, output_size):
"""Builds a fully connected layer.
Args:
layer_id: int. Integer ID for this layer's variables.
inputs: Tensor of shape [num_examples, input_size]. Each row corresponds
to a single example.
output_size: int. Number of output dimensions after fully connected layer.
Returns:
preactivations: Tensor of shape [num_examples, output_size]. Values of the
layer immediately before the activation function.
activations: Tensor of shape [num_examples, output_size]. Values of the
layer immediately after the activation function.
params: Tuple of (weights, bias), parameters for this layer.
"""
# TODO(b/67004004): Delete this function and rely on tf.layers exclusively.
layer = tf.layers.Dense(
output_size,
kernel_initializer=tf.random_normal_initializer(),
name="fc_%d" % layer_id)
preactivations = layer(inputs)
activations = tf.nn.tanh(preactivations)
# layer.weights is a list. This converts it a (hashable) tuple.
return preactivations, activations, (layer.kernel, layer.bias)
def build_model(examples, labels, num_labels, layer_collection):
"""Builds an MLP classification model.
Args:
examples: Tensor of shape [num_examples, num_features]. Represents inputs of
model.
labels: Tensor of shape [num_examples]. Contains integer IDs to be predicted
by softmax for each example.
num_labels: int. Number of distinct values 'labels' can take on.
layer_collection: LayerCollection instance describing model architecture.
Returns:
loss: 0-D Tensor representing loss to be minimized.
accuracy: 0-D Tensor representing model's accuracy.
"""
# Build an MLP. For each layer, we'll keep track of the preactivations,
# activations, weights, and bias.
pre0, act0, params0 = fc_layer(layer_id=0, inputs=examples, output_size=128)
pre1, act1, params1 = fc_layer(layer_id=1, inputs=act0, output_size=64)
pre2, act2, params2 = fc_layer(layer_id=2, inputs=act1, output_size=32)
logits, _, params3 = fc_layer(layer_id=3, inputs=act2, output_size=num_labels)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
accuracy = tf.reduce_mean(
tf.cast(tf.equal(labels, tf.argmax(logits, axis=1)), dtype=tf.float32))
# Register parameters. K-FAC needs to know about the inputs, outputs, and
# parameters of each layer and the logits powering the posterior probability
# over classes.
tf.logging.info("Building LayerCollection.")
layer_collection.register_fully_connected(params0, examples, pre0)
layer_collection.register_fully_connected(params1, act0, pre1)
layer_collection.register_fully_connected(params2, act1, pre2)
layer_collection.register_fully_connected(params3, act2, logits)
layer_collection.register_categorical_predictive_distribution(
logits, name="logits")
return loss, accuracy
def minimize(loss, accuracy, layer_collection, num_towers, session_config=None):
"""Minimize 'loss' with KfacOptimizer.
Args:
loss: 0-D Tensor. Loss to be minimized.
accuracy: 0-D Tensor. Accuracy of classifier on current minibatch.
layer_collection: LayerCollection instance. Describes layers in model.
num_towers: int. Number of CPUs to split minibatch across.
session_config: tf.ConfigProto. Configuration for tf.Session().
Returns:
accuracy of classifier on final minibatch.
"""
devices = tuple("/cpu:%d" % tower_id for tower_id in range(num_towers))
# Train with K-FAC. We'll use a decreasing learning rate that's cut in 1/2
# every 10k iterations.
tf.logging.info("Building KFAC Optimizer.")
global_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=tf.train.exponential_decay(
0.00002, global_step, 10000, 0.5, staircase=True),
cov_ema_decay=0.95,
damping=0.0005,
layer_collection=layer_collection,
momentum=0.99,
placement_strategy="round_robin",
cov_devices=devices,
inv_devices=devices)
(cov_update_thunks,
inv_update_thunks) = optimizer.make_vars_and_create_op_thunks()
def make_update_op(update_thunks):
update_ops = [thunk() for thunk in update_thunks]
return tf.group(*update_ops)
# TODO(b/78537047): change (some) examples to use PeriodicInvCovUpdateKfacOpt
# once that gets moved over? Could still leave more advanced examples as they
# are (e.g. train_mnist_estimator in this file)
cov_update_op = make_update_op(cov_update_thunks)
with tf.control_dependencies([cov_update_op]):
# We update the inverses only every 20 iterations.
inverse_op = tf.cond(
tf.equal(tf.mod(global_step, 100), 0),
lambda: make_update_op(inv_update_thunks), tf.no_op)
with tf.control_dependencies([inverse_op]):
train_op = optimizer.minimize(loss, global_step=global_step)
tf.logging.info("Starting training.")
with tf.train.MonitoredTrainingSession(config=session_config) as sess:
while not sess.should_stop():
global_step_, loss_, accuracy_, _ = sess.run(
[global_step, loss, accuracy, train_op])
if global_step_ % 100 == 0:
tf.logging.info("global_step: %d | loss: %f | accuracy: %f",
global_step_, loss_, accuracy_)
return accuracy_
def train_mnist(data_dir, num_epochs, use_fake_data=False):
"""Train an MLP on MNIST.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tf.logging.info("Loading MNIST into memory.")
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=64,
flatten_images=True,
use_fake_data=use_fake_data)
# Build an MLP. The model's layers will be added to the LayerCollection.
tf.logging.info("Building model.")
layer_collection = lc.LayerCollection()
loss, accuracy = build_model(examples, labels, 10, layer_collection)
# Fit model.
minimize(loss, accuracy, layer_collection, 1)
def train_mnist_multitower(data_dir,
num_epochs,
num_towers,
use_fake_data=False):
"""Train an MLP on MNIST, splitting the minibatch across multiple towers.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
num_towers: int. Number of CPUs to split minibatch across.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tower_batch_size = 64
batch_size = tower_batch_size * num_towers
tf.logging.info(
("Loading MNIST into memory. Using batch_size = %d = %d towers * %d "
"tower batch size.") % (batch_size, num_towers, tower_batch_size))
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=batch_size,
flatten_images=True,
use_fake_data=use_fake_data)
# Split minibatch across towers.
examples = tf.split(examples, num_towers)
labels = tf.split(labels, num_towers)
# Build an MLP. Each tower's layers will be added to the LayerCollection.
layer_collection = lc.LayerCollection()
tower_results = []
for tower_id in range(num_towers):
with tf.device("/cpu:%d" % tower_id):
with tf.name_scope("tower%d" % tower_id):
with tf.variable_scope(tf.get_variable_scope(), reuse=(tower_id > 0)):
tf.logging.info("Building tower %d." % tower_id)
tower_results.append(
build_model(examples[tower_id], labels[tower_id], 10,
layer_collection))
losses, accuracies = zip(*tower_results)
# Average across towers.
loss = tf.reduce_mean(losses)
accuracy = tf.reduce_mean(accuracies)
# Fit model.
session_config = tf.ConfigProto(
allow_soft_placement=False, device_count={
"CPU": num_towers
})
return minimize(
loss, accuracy, layer_collection, num_towers,
session_config=session_config)
def train_mnist_estimator(data_dir, num_epochs, use_fake_data=False):
"""Train an MLP on MNIST using tf.estimator.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
def input_fn():
tf.logging.info("Loading MNIST into memory.")
return mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=64,
flatten_images=True,
use_fake_data=use_fake_data)
def model_fn(features, labels, mode, params):
"""Model function for MLP trained with K-FAC.
Args:
features: Tensor of shape [batch_size, input_size]. Input features.
labels: Tensor of shape [batch_size]. Target labels for training.
mode: tf.estimator.ModeKey. Must be TRAIN.
params: ignored.
Returns:
EstimatorSpec for training.
Raises:
ValueError: If 'mode' is anything other than TRAIN.
"""
del params
if mode != tf.estimator.ModeKeys.TRAIN:
raise ValueError("Only training is supposed with this API.")
# Build a ConvNet.
layer_collection = lc.LayerCollection()
loss, accuracy = build_model(
features, labels, num_labels=10, layer_collection=layer_collection)
# Train with K-FAC.
global_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=tf.train.exponential_decay(
0.00002, global_step, 10000, 0.5, staircase=True),
cov_ema_decay=0.95,
damping=0.0001,
layer_collection=layer_collection,
momentum=0.99)
(cov_update_thunks,
inv_update_thunks) = optimizer.make_vars_and_create_op_thunks()
def make_update_op(update_thunks):
update_ops = [thunk() for thunk in update_thunks]
return tf.group(*update_ops)
def make_batch_executed_op(update_thunks, batch_size=1):
return tf.group(*tf.contrib.kfac.utils.batch_execute(
global_step, update_thunks, batch_size=batch_size))
# Run cov_update_op every step. Run 1 inv_update_ops per step.
cov_update_op = make_update_op(cov_update_thunks)
with tf.control_dependencies([cov_update_op]):
# But make sure to execute all the inverse ops on the first step
inverse_op = tf.cond(tf.equal(global_step, 0),
lambda: make_update_op(inv_update_thunks),
lambda: make_batch_executed_op(inv_update_thunks))
with tf.control_dependencies([inverse_op]):
train_op = optimizer.minimize(loss, global_step=global_step)
# Print metrics every 5 sec.
hooks = [
tf.train.LoggingTensorHook(
{
"loss": loss,
"accuracy": accuracy
}, every_n_secs=5),
]
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op, training_hooks=hooks)
run_config = tf.estimator.RunConfig(
model_dir="/tmp/mnist", save_checkpoints_steps=1, keep_checkpoint_max=100)
# Train until input_fn() is empty with Estimator. This is a prerequisite for
# TPU compatibility.
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
estimator.train(input_fn=input_fn)
| |
"""
Django settings for municipal_finance project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import dj_database_url
import os
import environ
import logging
logger = logging.getLogger("municipal_finance")
TESTING = False
env = environ.Env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ROOT_DIR = environ.Path(__file__) - 2
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("DJANGO_DEBUG", "true") == "true"
PRELOAD_CUBES = os.environ.get("PRELOAD_CUBES", "false") == "true"
# SECURITY WARNING: keep the secret key used in production secret!
if DEBUG:
SECRET_KEY = "-r&cjf5&l80y&(q_fiidd$-u7&o$=gv)s84=2^a2$o^&9aco0o"
else:
SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY")
DATA_GOOGLE_ANALYTICS_ID = "UA-48399585-37"
SCORECARD_GOOGLE_ANALYTICS_ID = "UA-48399585-40"
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"municipal_finance",
"scorecard",
"infrastructure",
"household",
"webflow",
"django.contrib.sites",
"django.contrib.contenttypes",
"django.contrib.humanize",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.staticfiles",
"pipeline",
"django_extensions",
"corsheaders",
"rest_framework",
"django_q",
"storages",
"debug_toolbar",
"constance",
"constance.backends.database",
"import_export",
)
# Sites
# 2: Scorecard
# 3: API
if os.environ.get("SITE_ID", None):
SITE_ID = int(os.environ.get("SITE_ID"))
DATA_PORTAL_URL = os.environ.get(
"DATA_PORTAL_URL", "https://municipaldata.treasury.gov.za"
)
API_URL = DATA_PORTAL_URL + "/api"
MAPIT = {"url": "https://mapit.code4sa.org", "generation": "2"}
MIDDLEWARE = [
"django.middleware.gzip.GZipMiddleware",
'debug_toolbar.middleware.DebugToolbarMiddleware',
"municipal_finance.middleware.RedirectsMiddleware",
"municipal_finance.middleware.SiteMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.middleware.common.CommonMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"municipal_finance.middleware.ApiErrorHandler",
]
CONSTANCE_BACKEND = "constance.backends.database.DatabaseBackend"
CONSTANCE_ADDITIONAL_FIELDS = {
'quarter_select': ['django.forms.fields.ChoiceField', {
'widget': 'django.forms.Select',
'choices': ((1, "1"), (2, "2"), (3, "3"), (4, "4"),)
}],
}
CONSTANCE_CONFIG = {
"LAST_AUDIT_YEAR": [
2019,
"The last financial year that should be included when compiling "
"fiscal indicators for municipal profiles",
int,
],
"LAST_OPINION_YEAR": [
2019,
"The last financial year that should be included when gathering "
"audit opinions for municipal profiles",
int,
],
"LAST_UIFW_YEAR": [
2019,
"The last financial year that should be included when compiling "
"indicators that make use Unautherised, Irregular, Fruitless and "
"Wasteful expenditure data for municipal profiles"
"expenditure data",
int,
],
"LAST_AUDIT_QUARTER": [
"2019q4",
"The last quarter for which an audit is expected, used for "
"determining if a demarcation was established before or after "
"the last qudit tok place",
str,
],
"GRANTS_LATEST_YEAR": [
2019,
"The last year for which grant spending data is available. "
"This is used to show \"Spent up to 2020-21 Q3\" or whatever is "
"the selected year and quarter.",
int,
],
"GRANTS_LATEST_QUARTER": [
4,
"The last quarter for which grant spending data is available. "
"This is used to show \"Spent up to 2020-21 Q3\" or whatever is "
"the selected year and quarter.",
"quarter_select",
],
}
ROOT_URLCONF = "municipal_finance.urls"
WSGI_APPLICATION = "municipal_finance.wsgi.application"
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASE_URL = os.environ.get(
"DATABASE_URL",
"postgres://municipal_finance:municipal_finance@localhost:5432/municipal_finance",
)
db_config = dj_database_url.parse(DATABASE_URL)
db_config["ATOMIC_REQUESTS"] = True
DATABASES = {"default": db_config}
# Caches
if DEBUG:
CACHES = {"default": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"}}
else:
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
"LOCATION": "/var/tmp/django_cache",
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = "en-za"
TIME_ZONE = "Africa/Johannesburg"
USE_I18N = False
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
FORMAT_MODULE_PATH = "municipal_finance.formats"
# CORS
CORS_ORIGIN_ALLOW_ALL = True
# Templates
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"debug": DEBUG,
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
"municipal_finance.context_processors.google_analytics",
"municipal_finance.context_processors.sentry_dsn",
"municipal_finance.context_processors.api_details",
],
},
}
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
ASSETS_DEBUG = DEBUG
ASSETS_URL_EXPIRE = False
# assets must be placed in the 'static' dir of your Django app
# where the compiled assets go
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
# the URL for assets
STATIC_URL = "/static/"
STATICFILES_DIRS = [
str(ROOT_DIR.path("assets/bundles")),
]
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"pipeline.finders.PipelineFinder",
)
PYSCSS_LOAD_PATHS = [
os.path.join(BASE_DIR, "municipal_finance", "static"),
os.path.join(BASE_DIR, "municipal_finance", "static", "bower_components"),
os.path.join(BASE_DIR, "scorecard", "static"),
os.path.join(BASE_DIR, "scorecard", "static", "bower_components"),
os.path.join(BASE_DIR, "census", "static"),
]
PIPELINE = {
"STYLESHEETS": {
"docs": {
"source_filenames": (
"bower_components/fontawesome/css/font-awesome.css",
"bower_components/bootstrap-sass/assets/stylesheets/_bootstrap.scss",
"slate/stylesheets/screen.css",
"stylesheets/docs.scss",
),
"output_filename": "docs.css",
},
"api-home": {
"source_filenames": (
"bower_components/fontawesome/css/font-awesome.css",
"bower_components/bootstrap-sass/assets/stylesheets/_bootstrap.scss",
"stylesheets/site.scss",
),
"output_filename": "api-home.css",
},
"table": {
"source_filenames": (
"bower_components/fontawesome/css/font-awesome.css",
"bower_components/bootstrap-sass/assets/stylesheets/_bootstrap.scss",
"stylesheets/vendor/select2.min.css",
"stylesheets/table.scss",
),
"output_filename": "table.css",
},
"scorecard": {
"source_filenames": (
"stylesheets/vendor/leaflet-0.6.4.css",
"stylesheets/vendor/leaflet.label.css",
),
"output_filename": "scorecard.css",
},
},
"JAVASCRIPT": {
"js": {
"source_filenames": (
"javascript/vendor/jquery-1.12.3.min.js",
"javascript/app.js",
),
"output_filename": "app.js",
},
"docs": {
"source_filenames": (
"javascript/vendor/jquery-1.12.3.min.js",
"slate/javascripts/lib/_energize.js",
"slate/javascripts/lib/_lunr.js",
"slate/javascripts/lib/_jquery_ui.js",
"slate/javascripts/lib/_jquery.tocify.js",
"slate/javascripts/lib/_jquery.highlight.js",
"slate/javascripts/lib/_imagesloaded.min.js",
"slate/javascripts/app/_lang.js",
"slate/javascripts/app/_search.js",
"slate/javascripts/app/_toc.js",
"bower_components/underscore/underscore-min.js",
"bower_components/backbone/backbone-min.js",
"javascript/vendor/js.cookie.js",
"bower_components/bootstrap-sass/assets/javascripts/bootstrap.min.js",
"javascript/docs.js",
),
"output_filename": "docs.js",
},
"api-home": {
"source_filenames": (
"javascript/vendor/jquery-1.12.3.min.js",
"bower_components/bootstrap-sass/assets/javascripts/bootstrap.min.js",
"javascript/app.js",
),
"output_filename": "home.js",
},
"table": {
"source_filenames": (
"javascript/vendor/jquery-1.12.3.min.js",
"bower_components/underscore/underscore-min.js",
"bower_components/backbone/backbone-min.js",
"javascript/vendor/d3-format.min.js",
"javascript/vendor/select2.min.js",
"javascript/vendor/js.cookie.js",
"bower_components/bootstrap-sass/assets/javascripts/bootstrap.min.js",
"javascript/table.js",
),
"output_filename": "table.js",
},
"scorecard": {
"source_filenames": (
"bower_components/underscore/underscore-min.js",
"bower_components/d3/d3.min.js",
"js/vendor/d3-format.min.js",
"js/vendor/typeahead-0.11.1.js",
"js/vendor/leaflet-0.6.4.js",
"js/vendor/leaflet.label.js",
"js/charts.js",
"js/place-finder.js",
"js/maps.js",
),
"output_filename": "scorecard.js",
},
"infrastructure": {
"source_filenames": (
"js/utils.js",
"js/sorter.js",
"js/barchart.js",
"js/mm-webflow.js",
),
"output_filename": "infrastructure.js",
},
},
"CSS_COMPRESSOR": None,
"JS_COMPRESSOR": None,
"DISABLE_WRAPPER": True,
"COMPILERS": ("municipal_finance.pipeline.PyScssCompiler",),
}
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = "municipal_finance.pipeline.GzipManifestPipelineStorage"
WHITENOISE_MIMETYPES = {
'.map': 'application/octet-stream',
}
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "%(asctime)s %(levelname)s %(module)s %(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "INFO",
"class": "logging.StreamHandler",
"formatter": "simple",
}
},
"root": {"handlers": ["console"], "level": "ERROR"},
"loggers": {
"municipal_finance": {"level": "DEBUG" if DEBUG else "INFO"},
"sqlalchemy.engine": {"level": "INFO" if DEBUG else "WARN"},
"django": {"level": "DEBUG" if DEBUG else "INFO"},
},
}
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": [],
"DEFAULT_PERMISSION_CLASSES": [],
"UNAUTHENTICATED_USER": None,
"DEFAULT_RENDERER_CLASSES": ["rest_framework.renderers.JSONRenderer"],
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 150,
}
Q_CLUSTER = {
"name": "DjangORM",
"workers": 1,
"timeout": 3600,
"retry": 700,
"queue_limit": 100,
"bulk": 50,
"orm": "default",
"poll": 5,
"max_attempts": 1,
"ack_failures": True, # Dequeue failed tasks
}
if not DEBUG:
DEFAULT_FILE_STORAGE = "municipal_finance.storage.MediaStorage"
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str(
"AWS_STORAGE_BUCKET_NAME", "munimoney-media"
)
AWS_S3_CUSTOM_DOMAIN = "%s.s3.amazonaws.com" % AWS_STORAGE_BUCKET_NAME
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": "max-age=86400",
}
else:
DEFAULT_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME")
AWS_DEFAULT_ACL = "public-read"
AWS_BUCKET_ACL = "public-read"
AWS_AUTO_CREATE_BUCKET = True
AWS_S3_ENDPOINT_URL = env.str("AWS_S3_ENDPOINT_URL", None)
AWS_S3_REGION_NAME = env.str("AWS_S3_REGION_NAME", None)
AWS_S3_SECURE_URLS = env.bool("AWS_S3_SECURE_URLS", True)
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", None)
AWS_S3_FILE_OVERWRITE = False
# Do NOT use this for feature flags. Just use it to tell the outside world
# which environment messages e.g. logs or errors are coming from.
ENVIRONMENT = env.str("ENVIRONMENT")
SENTRY_DSN = env.str("SENTRY_DSN", None)
SENTRY_PERF_SAMPLE_RATE = env.float("SENTRY_PERF_SAMPLE_RATE", 0.1)
if SENTRY_DSN:
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[DjangoIntegration()],
traces_sample_rate=SENTRY_PERF_SAMPLE_RATE,
environment=ENVIRONMENT,
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True
)
DEBUG_TOOLBAR = os.environ.get(
"DJANGO_DEBUG_TOOLBAR", "false").lower() == "true"
logger.info("Django Debug Toolbar %s." %
"enabled" if DEBUG_TOOLBAR else "disabled")
DEBUG_TOOLBAR_CONFIG = {
"SHOW_TOOLBAR_CALLBACK": "municipal_finance.settings.show_toolbar_check"
}
def show_toolbar_check(request):
return DEBUG and DEBUG_TOOLBAR
| |
#
# Appcelerator Titanium Mobile
# Copyright (c) 2011-2012 by Appcelerator, Inc. All Rights Reserved.
# Licensed under the terms of the Apache Public License
# Please see the LICENSE included with this distribution for details.
#
# A custom server that speeds up development time in Android significantly
import os, sys, time, optparse, logging
import urllib, threading
import SocketServer, socket, struct, codecs
import platform, mimetypes
# we use our compatibility code for python 2.5
if sys.version_info < (2, 6):
from tcpserver import TCPServer
else:
from SocketServer import TCPServer
logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO)
support_android_dir = os.path.dirname(os.path.abspath(__file__))
support_dir = os.path.dirname(support_android_dir)
sys.path.append(support_dir)
sys.path.append(os.path.join(support_dir, "common"))
import tiapp, simplejson
server = None
request_count = 0
start_time = time.time()
idle_thread = None
is_windows = (platform.system() == 'Windows')
utf8_codec = codecs.lookup("utf-8")
def pack_int(i):
return struct.pack("!i", i)
def send_tokens(socket, *tokens):
buffer = pack_int(len(tokens))
for token in tokens:
buffer += pack_int(len(token))
buffer += token
socket.sendall(buffer)
def read_int(socket):
data = socket.recv(4)
if not data: return None
return struct.unpack("!i", data)[0]
def read_tokens(socket):
token_count = read_int(socket)
if token_count == None: return None
tokens = []
for i in range(0, token_count):
length = read_int(socket)
if length == 0:
data = ""
else:
data = socket.recv(length)
tokens.append(data)
return tokens
def should_open_binary(path):
if not is_windows:
return False
p = path.lower()
(base, ext) = os.path.splitext(p)
if not ext:
return True
# Some quick exit possibilities.
if ext in (".js", ".jss", ".html", ".xml", ".htm", ".txt", ".css", ".json"):
return False
if ext in (".gif", ".bmp", ".png", ".jpg", ".jpeg", ".db", ".mp3", ".mov", ".wav", ".mpg", ".mpeg", ".3gp", ".3gpp", ".m4a", ".mp4", ".flac", ".ogg"):
return True
(mime_type, encoding) = mimetypes.guess_type(p)
if mime_type and mime_type.startswith("text"):
return False
else:
return True
""" A simple idle checker thread """
class IdleThread(threading.Thread):
def __init__(self, max_idle_time):
super(IdleThread, self).__init__()
self.idle_time = 0
self.max_idle_time = max_idle_time
self.running = True
def clear_idle_time(self):
self.idle_lock.acquire()
self.idle_time = 0
self.idle_lock.release()
def run(self):
self.idle_lock = threading.Lock()
while self.running:
if self.idle_time < self.max_idle_time:
time.sleep(1)
self.idle_lock.acquire()
self.idle_time += 1
self.idle_lock.release()
else:
logging.info("Shutting down Fastdev server due to idle timeout: %s" % self.idle_time)
server.shutdown()
self.running = False
"""
A handler for fastdev requests.
The fastdev server uses a simple binary protocol comprised of messages and tokens.
Without a valid handshake, no requests will be processed.
Currently supported commands are:
- "handshake" <guid> : Application handshake
- "script-handshake" <guid> : Script control handshake
- "get" <Resources relative path> : Get the contents of a file from the Resources folder
- "kill-app" : Kills the connected app's process
- "restart-app" : Restarts the connected app's process
-"shutdown" : Shuts down the server
Right now the VFS rules for "get" are:
- Anything under "Resources" is served as is
- Anything under "Resources/android" overwrites anything under "Resources" (and is mapped to the root)
"""
class FastDevHandler(SocketServer.BaseRequestHandler):
resources_dir = None
handshake = None
app_handler = None
def handle(self):
logging.info("connected: %s:%d" % self.client_address)
global request_count
self.valid_handshake = False
self.request.settimeout(1.0)
while True:
try:
tokens = read_tokens(self.request)
if tokens == None:
break
except socket.timeout, e:
# only break the loop when not serving, otherwise timeouts are normal
serving = False
if sys.version_info < (2, 6):
serving = server.is_serving()
elif sys.version_info < (2, 7):
serving = server._BaseServer__serving
else:
serving = not server._BaseServer__is_shut_down.isSet()
if not serving:
break
else: continue
idle_thread.clear_idle_time()
command = tokens[0]
if command == "handshake":
FastDevHandler.app_handler = self
self.handle_handshake(tokens[1])
elif command == "script-handshake":
self.handle_handshake(tokens[1])
else:
if not self.valid_handshake:
self.send_tokens("Invalid Handshake")
break
if command == "length":
request_count += 1
self.handle_length(tokens[1])
elif command == "exists":
request_count += 1
self.handle_exists(tokens[1])
elif command == "get":
request_count += 1
self.handle_get(tokens[1])
elif command == "kill-app":
self.handle_kill_app()
break
elif command == "restart-app":
self.handle_restart_app()
break
elif command == "status":
self.handle_status()
elif command == "shutdown":
self.handle_shutdown()
break
logging.info("disconnected: %s:%d" % self.client_address)
def handle_handshake(self, handshake):
logging.info("handshake: %s" % handshake)
if handshake == self.handshake:
self.send_tokens("OK")
self.valid_handshake = True
else:
logging.warn("handshake: invalid handshake sent, rejecting")
self.send_tokens("Invalid Handshake")
def get_resource_path(self, relative_path):
android_path = os.path.join(self.resources_dir, 'android', relative_path)
path = os.path.join(self.resources_dir, relative_path)
if os.path.exists(android_path):
return android_path
elif os.path.exists(path):
return path
else:
return None
def handle_length(self, relative_path):
path = self.get_resource_path(relative_path)
if path != None:
length = os.path.getsize(path)
logging.info("length %s: %d" % (relative_path, length))
self.send_tokens(pack_int(length))
else:
logging.info("length %s: path not found" % relative_path)
self.send_tokens(pack_int(-1))
def handle_exists(self, relative_path):
path = self.get_resource_path(relative_path)
if path != None:
logging.info("%s exists: true" % relative_path)
self.send_tokens(pack_int(1))
else:
logging.info("%s exists: false" % relative_path)
self.send_tokens(pack_int(0))
def handle_get(self, relative_path):
path = self.get_resource_path(relative_path)
if path is None:
logging.warn("get %s: path not found" % relative_path)
self.send_tokens("NOT_FOUND")
return
if os.path.isfile(path) is False:
logging.warn("get %s: path is a directory" % relative_path)
self.send_tokens("NOT_FOUND")
return
logging.info("get %s: %s" % (relative_path, path))
self.send_file(path)
def send_tokens(self, *tokens):
send_tokens(self.request, *tokens)
def send_file(self, path):
mode = 'r'
if should_open_binary(path):
mode += 'b'
buffer = open(path, mode).read()
self.send_tokens(buffer)
def handle_kill_app(self):
logging.info("request: kill-app")
if FastDevHandler.app_handler != None:
try:
FastDevHandler.app_handler.send_tokens("kill")
self.send_tokens("OK")
except Exception, e:
logging.error("kill: error: %s" % e)
self.send_tokens(str(e))
else:
self.send_tokens("App not connected")
logging.warn("kill: no app is connected")
def handle_restart_app(self):
logging.info("request: restart-app")
if FastDevHandler.app_handler != None:
try:
FastDevHandler.app_handler.send_tokens("restart")
self.send_tokens("OK")
except Exception, e:
logging.error("restart: error: %s" % e)
self.send_tokens(str(e))
else:
self.send_tokens("App not connected")
logging.warn("restart: no app is connected")
def handle_status(self):
logging.info("request: status")
global server
global request_count
global start_time
app_connected = FastDevHandler.app_handler != None
status = {
"uptime": int(time.time() - start_time),
"pid": os.getpid(),
"app_connected": app_connected,
"request_count": request_count,
"port": server.server_address[1]
}
self.send_tokens(simplejson.dumps(status))
def handle_shutdown(self):
self.send_tokens("OK")
server.shutdown()
idle_thread.running = False
class ThreadingTCPServer(SocketServer.ThreadingMixIn, TCPServer):
def shutdown_noblock(self):
if sys.version_info < (2, 6):
self.__serving = False
elif sys.version_info < (2, 7):
self._BaseServer__serving = False
else:
self._BaseServer__shutdown_request = True
class FastDevRequest(object):
def __init__(self, dir, options):
self.lock_file = get_lock_file(dir, options)
if not os.path.exists(self.lock_file):
print >>sys.stderr, "Error: No Fastdev Servers found. " \
"The lock file at %s does not exist, you either need to run \"stop\" " \
"within your Titanium project or specify the lock file with -l <lock file>" \
% self.lock_file
sys.exit(1)
f = open(self.lock_file, 'r')
self.data = simplejson.loads(f.read())
f.close()
self.port = self.data["port"]
self.app_guid = self.data["app_guid"]
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((socket.gethostname(), self.port))
send_tokens(self.socket, "script-handshake", self.app_guid)
response = read_tokens(self.socket)[0]
if response != "OK":
print >>sys.stderr, "Error: Handshake was not accepted by the Fastdev server"
sys.exit(1)
def send(self, *tokens):
send_tokens(self.socket, *tokens)
return read_tokens(self.socket)
def close(self):
self.socket.close()
def get_lock_file(dir, options):
lock_file = options.lock_file
if lock_file == None:
lock_file = os.path.join(dir, ".fastdev.lock")
return lock_file
def start_server(dir, options):
xml = tiapp.TiAppXML(os.path.join(dir, "tiapp.xml"))
app_id = xml.properties["id"]
app_guid = xml.properties["guid"]
lock_file = get_lock_file(dir, options)
if os.path.exists(lock_file):
print "Fastdev server already running for %s" % app_id
sys.exit(0)
resources_dir = os.path.join(dir, 'Resources')
FastDevHandler.resources_dir = resources_dir
FastDevHandler.handshake = app_guid
global server
global idle_thread
server = ThreadingTCPServer(("", int(options.port)), FastDevHandler)
port = server.server_address[1]
logging.info("Serving up files for %s at 0.0.0.0:%d from %s" % (app_id, port, dir))
f = open(lock_file, 'w+')
f.write(simplejson.dumps({
"ip": "0.0.0.0",
"port": port,
"dir": dir,
"app_id": app_id,
"app_guid": app_guid
}))
f.close()
try:
idle_thread = IdleThread(int(options.timeout))
idle_thread.start()
server.serve_forever()
except KeyboardInterrupt, e:
idle_thread.running = False
server.shutdown_noblock()
print "Terminated"
logging.info("Fastdev server stopped.")
os.unlink(lock_file)
def stop_server(dir, options):
request = FastDevRequest(dir, options)
print request.send("shutdown")[0]
request.close()
print "Fastdev server for %s stopped." % request.data["app_id"]
def kill_app(dir, options):
request = FastDevRequest(dir, options)
result = request.send("kill-app")
request.close()
if result and result[0] == "OK":
print "Killed app %s." % request.data["app_id"]
return True
else:
print "Error killing app, result: %s" % result
return False
def restart_app(dir, options):
request = FastDevRequest(dir, options)
result = request.send("restart-app")
request.close()
if result and result[0] == "OK":
print "Restarted app %s." % request.data["app_id"]
return True
else:
print "Error restarting app, result: %s" % result
return False
def is_running(dir):
class Options(object): pass
options = Options()
options.lock_file = os.path.join(dir, '.fastdev.lock')
if not os.path.exists(options.lock_file):
return False
try:
request = FastDevRequest(dir, options)
result = request.send("status")[0]
request.close()
status = simplejson.loads(result)
return type(status) == dict
except Exception, e:
return False
def status(dir, options):
lock_file = get_lock_file(dir, options)
if lock_file == None or not os.path.exists(lock_file):
print "No Fastdev servers running in %s" % dir
else:
data = simplejson.loads(open(lock_file, 'r').read())
port = data["port"]
try:
request = FastDevRequest(dir, options)
result = request.send("status")[0]
request.close()
status = simplejson.loads(result)
print "Fastdev server running for app %s:" % data["app_id"]
print "Port: %d" % port
print "Uptime: %d sec" % status["uptime"]
print "PID: %d" % status["pid"]
print "Requests: %d" % status["request_count"]
except Exception, e:
print >>sys.stderr, "Error: .fastdev.lock found in %s, but couldn't connect to the server on port %d: %s. Try manually deleting .fastdev.lock." % (dir, port, e)
def get_optparser():
usage = """Usage: %prog [command] [options] [app-dir]
Supported Commands:
start start the fastdev server
status get the status of the fastdev server
stop stop the fastdev server
restart-app restart the app connected to this fastdev server
kill-app kill the app connected to this fastdev server
"""
parser = optparse.OptionParser(usage)
parser.add_option('-p', '--port', dest='port',
help='port to bind the server to [default: first available port]', default=0)
parser.add_option('-t', '--timeout', dest='timeout',
help='Timeout in seconds before the Fastdev server shuts itself down when it hasn\'t received a request [default: %default]',
default=30 * 60)
parser.add_option('-l', '--lock-file', dest='lock_file',
help='Path to the server lock file [default: app-dir/.fastdev.lock]',
default=None)
return parser
def main():
parser = get_optparser()
(options, args) = parser.parse_args()
if len(args) == 0 or args[0] not in ['start', 'stop', 'kill-app', 'restart-app', 'status']:
parser.error("Missing required command")
sys.exit(1)
command = args[0]
dir = os.getcwd()
if len(args) > 1:
dir = os.path.expanduser(args[1])
dir = os.path.abspath(dir)
if command == "start":
if not os.path.exists(os.path.join(dir, "tiapp.xml")):
parser.error("Directory is not a Titanium Project: %s" % dir)
sys.exit(1)
try:
start_server(dir, options)
except Exception, e:
print >>sys.stderr, "Error starting Fastdev server: %s" % e
elif command == "stop":
stop_server(dir, options)
elif command == "kill-app":
kill_app(dir, options)
elif command == 'restart-app':
restart_app(dir, options)
elif command == "status":
status(dir, options)
if __name__ == "__main__":
main()
| |
# Author: xiaotaw@qq.com (Any bug report is welcome)
# Time Created: Dec 2016
# Time Last Updated: Dec 2016
# Addr: Shenzhen, China
# Description: calculate mask(label) of chembl molecules for specific targets
import os
import sys
import math
import time
import datetime
import multiprocessing
import numpy as np
from scipy import sparse
from collections import defaultdict
# folders
fp_dir = "fp_files"
structure_dir = "structure_files"
mask_dir = "mask_files"
if not os.path.exists(mask_dir):
os.mkdir(mask_dir)
log_dir = "log_files"
if not os.path.exists(log_dir):
os.mkdir(log_dir)
# the newly picked out 15 targets, include 9 targets from 5 big group, and 6 targets from others.
target_list = ["CHEMBL279", "CHEMBL203", # Protein Kinases
"CHEMBL217", "CHEMBL253", # GPCRs (Family A)
"CHEMBL235", "CHEMBL206", # Nuclear Hormone Receptors
"CHEMBL240", "CHEMBL4296", # Voltage Gated Ion Channels
"CHEMBL4805", # Ligand Gated Ion Channels
"CHEMBL204", "CHEMBL244", "CHEMBL4822", "CHEMBL340", "CHEMBL205", "CHEMBL4005" # Others
]
# the target
#target = target_list[int(sys.argv[1])]
# read chembl id and apfp
chembl_id = []
chembl_apfp = {}
f = open(os.path.join(fp_dir, "chembl.apfp"), "r")
for line in f:
id_, fps_str = line.split("\t")
id_ = id_.strip()
fps_str = fps_str.strip()
chembl_id.append(id_)
chembl_apfp[id_] = fps_str
f.close()
# read (pubchem negative sample)pns apfp and counts the fps that appeared in pns compounds
pns_id = []
pns_apfp = {}
pns_count = defaultdict(lambda : 0)
f = open(os.path.join(fp_dir, "pubchem_neg_sample.apfp"), "r")
for line in f:
id_, fps_str = line.split("\t")
id_ = id_.strip()
fps_str = fps_str.strip()
pns_id.append(id_)
pns_apfp[id_] = fps_str
for fp in fps_str[1:-1].split(","):
if ":" in fp:
k, _ = fp.split(":")
pns_count[int(k)] += 1
f.close()
# read top 79 targets' label
clf_label_79 = np.genfromtxt(os.path.join(structure_dir, "chembl_top79.label"), usecols=[0, 2, 3], delimiter="\t", skip_header=1, dtype=str)
def cal_mask(target):
################################################################################
# generate sparse matrix for target features
# target compounds' chembl_id and clf label.
target_clf_label = clf_label_79[clf_label_79[:, 0] == target]
# remove compounds whose apfp cannot be caculated
m = []
for cmpd_id in target_clf_label[:, 1]:
if cmpd_id in chembl_id:
m.append(True)
else:
m.append(False)
target_clf_label = target_clf_label[np.array(m)]
# target fps
target_fps = [chembl_apfp[x] for x in target_clf_label[:, 1]]
# count the fps that appeared in the compounds of the target
target_count = defaultdict(lambda : 0)
for fps_str in target_fps:
for fp in fps_str[1:-1].split(","):
if ":" in fp:
k, _ = fp.split(":")
target_count[int(k)] += 1
target_count.update(pns_count)
# save target apfp count
count_file = open(os.path.join(mask_dir, "%s_apfp.count" % target), "w")
for k in target_count.keys():
count_file.write("%d\t%d\n" % (k, target_count[k]))
count_file.close()
# pick out that fps that appeared for more than 10 times.
# Here we assume that the more frequently a fp appeared, the more important it is.
v = np.array([[k, target_count[k]] for k in target_count.keys()])
m = v[:, 1] > 10
target_apfp_picked = v[m][:, 0]
# according to the apfp that picked out, define the columns in the feature sparse matrix
# Note: a defaultdict is used.
# And the purpose is assign a default value(length of target_apfp_picked) for the apfps
# which is not included in target_apfp_picked. And this column(the last column) was finally
# not used at all.
columns_dict = defaultdict(lambda : len(target_apfp_picked))
for i, apfp in enumerate(target_apfp_picked):
columns_dict[apfp] = i
# define the function which can construct a feature sparse matrix according to the columns_dict
def sparse_features(fps_list):
data = []
indices = []
indptr = [0]
for fps_str in fps_list:
n = indptr[-1]
for fp in fps_str[1:-1].split(","):
if ":" in fp:
k, v = fp.split(":")
indices.append(columns_dict[int(k)])
data.append(int(v))
n += 1
indptr.append(n)
a = sparse.csr_matrix((np.array(data), indices, indptr), shape=(len(fps_list), len(target_apfp_picked) + 1))
return a
# pick out target compounds with pos labels
# normally, abs(clf_label) > 0.5(refer to chembl_preparation.py),
# so it also works when using the following line:
# target_pos_id = target_clf_label[target_clf_label[:, 2].astype(float) > 0.5][:, 1]
target_pos_id = target_clf_label[target_clf_label[:, 2].astype(float) > 0][:, 1]
target_pos_fps = [chembl_apfp[x] for x in target_pos_id]
# generate feature sparse matrix for target's pos compounds
target_pos_features = sparse_features(target_pos_fps)[:, :-1].toarray()
# generate feature sparse matrix for pns compounds
target_pns_features = sparse_features([pns_apfp[k] for k in pns_id])[:, :-1]
# generate feature sparse matrix for (chembl negative sample)cns compounds
target_cns_features = sparse_features([chembl_apfp[k] for k in chembl_id])[:, :-1]
################################################################################
# generate mask for pns and cns
# define a task function for sub process:
# it can compare a part of negative sample(cns or pns) with pos samples,
# and return the mask of those samples back to the main process.
def sub_compare(sub_neg_id, sub_neg_features, conn):
mask = {}
log_str = []
for neg_k, neg_f in zip(sub_neg_id, sub_neg_features):
for pos_k, pos_f in zip(target_pos_id, target_pos_features):
if (neg_f != pos_f).sum() == 0:
mask[neg_k] = True
log_str.append("%s\t%s\n" % (neg_k, pos_k))
conn.send((mask, log_str))
conn.close()
# the number of sub process for computation
n_jobs = 6
# using multiprocessing compute mask for pns
t1 = time.time()
date1 = datetime.datetime.now()
num_per_job = int(math.ceil(target_pns_features.shape[0] / float(n_jobs)))
thread_list = []
conn_list = []
for i in range(0, n_jobs):
begin = i * num_per_job
end = (i + 1) * num_per_job
if end > target_pns_features.shape[0]:
end = target_pns_features.shape[0]
p_conn, c_conn = multiprocessing.Pipe()
conn_list.append((p_conn, c_conn))
t = multiprocessing.Process(target=sub_compare, args=(pns_id[begin: end], target_pns_features[begin: end], c_conn))
thread_list.append(t)
for i in range(n_jobs):
thread_list[i].start()
for i in range(n_jobs):
thread_list[i].join()
t2 = time.time()
target_pns_mask = defaultdict(lambda : False)
log = open(log_dir + "/" + target + "_gen_pns_mask.log", "w")
log.write("%s generate mask for pubchem neg sample, begins at %s\n" % (target, str(date1)))
for i in range(n_jobs):
p_conn = conn_list[i][0]
mask, log_str = p_conn.recv()
target_pns_mask.update(mask)
log.writelines(log_str)
log.write("generate mask for pns, duration: %.3f\n" % (t2 - t1))
log.close()
mask_file = open(os.path.join(mask_dir, "%s_pns.mask" % target), "w")
mask_file.writelines(["%s\t%s\n" % (x, target_pns_mask[x]) for x in pns_id])
mask_file.close()
print("generate mask for pns, duration: %.3f" % (t2 - t1))
# using multiprocessing compute mask for cns
t2 = time.time()
date2 = datetime.datetime.now()
num_per_job = int(math.ceil(target_cns_features.shape[0] / float(n_jobs)))
thread_list = []
conn_list = []
for i in range(0, n_jobs):
begin = i * num_per_job
end = (i + 1) * num_per_job
if end > target_cns_features.shape[0]:
end = target_cns_features.shape[0]
p_conn, c_conn = multiprocessing.Pipe()
conn_list.append((p_conn, c_conn))
t = multiprocessing.Process(target=sub_compare, args=(chembl_id[begin: end], target_cns_features[begin: end], c_conn))
thread_list.append(t)
for i in range(n_jobs):
thread_list[i].start()
for i in range(n_jobs):
thread_list[i].join()
t3 = time.time()
target_cns_mask = defaultdict(lambda : False)
log = open(log_dir + "/" + target + "_gen_cns_mask.log", "w")
log.write("%s generate mask for chembl neg sample, begins at %s\n" % (target, str(date2)))
for i in range(n_jobs):
p_conn = conn_list[i][0]
mask, log_str = p_conn.recv()
target_cns_mask.update(mask)
log.writelines(log_str)
log.write("generate mask for cns, duration: %.3f\n" % (t3 - t2))
log.close()
mask_file = open(os.path.join(mask_dir, "%s_cns.mask" % target), "w")
mask_file.writelines(["%s\t%s\n" % (x, target_cns_mask[x]) for x in chembl_id])
mask_file.close()
print("generate mask for cns, duration: %.3f" % (t3 - t2))
# the newly picked out 15 targets, include 9 targets from 5 big group, and 6 targets from others.
target_list = [
"CHEMBL4805", # Ligand Gated Ion Channels
"CHEMBL244", "CHEMBL4822", "CHEMBL340", "CHEMBL205", "CHEMBL4005" # Others
]
#for target in target_list:
# cal_mask(target)
cal_mask(sys.argv[1])
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for configuring TensorFlow execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export('config.threading.get_intra_op_parallelism_threads')
def get_intra_op_parallelism_threads():
"""Get number of threads used within an individual op for parallelism.
Certain operations like matrix multiplication and reductions can utilize
parallel threads for speed ups. A value of 0 means the system picks an
appropriate number.
Returns:
Number of parallel threads
"""
return context.context().intra_op_parallelism_threads
@tf_export('config.threading.set_intra_op_parallelism_threads')
def set_intra_op_parallelism_threads(num_threads):
"""Set number of threads used within an individual op for parallelism.
Certain operations like matrix multiplication and reductions can utilize
parallel threads for speed ups. A value of 0 means the system picks an
appropriate number.
Args:
num_threads: Number of parallel threads
"""
context.context().intra_op_parallelism_threads = num_threads
@tf_export('config.threading.get_inter_op_parallelism_threads')
def get_inter_op_parallelism_threads():
"""Get number of threads used for parallelism between independent operations.
Determines the number of threads used by independent non-blocking operations.
0 means the system picks an appropriate number.
Returns:
Number of parallel threads
"""
return context.context().inter_op_parallelism_threads
@tf_export('config.threading.set_inter_op_parallelism_threads')
def set_inter_op_parallelism_threads(num_threads):
"""Set number of threads used for parallelism between independent operations.
Determines the number of threads used by independent non-blocking operations.
0 means the system picks an appropriate number.
Args:
num_threads: Number of parallel threads
"""
context.context().inter_op_parallelism_threads = num_threads
@tf_export('config.optimizer.get_jit')
def get_optimizer_jit():
"""Get if JIT compilation is enabled.
Note that optimizations are only applied to code that is compiled into a
graph. In eager mode, which is the TF2 API default, that means only code that
is defined under a tf.function decorator.
Returns:
If JIT compilation is enabled.
"""
return context.context().optimizer_jit
@tf_export('config.optimizer.set_jit')
def set_optimizer_jit(enabled):
"""Set if JIT compilation is enabled.
Note that optimizations are only applied to code that is compiled into a
graph. In eager mode, which is the TF2 API default, that means only code that
is defined under a tf.function decorator.
Args:
enabled: Whether to enable JIT compilation.
"""
context.context().optimizer_jit = enabled
@tf_export('config.optimizer.get_experimental_options')
def get_optimizer_experimental_options():
"""Get experimental optimizer options.
Refer to tf.config.optimizer.set_experimental_options for a list of current
options.
Note that optimizations are only applied in graph mode, (within tf.function).
In addition, as these are experimental options, the list is subject to change.
Returns:
Dictionary of configured experimental optimizer options
"""
return context.context().get_optimizer_experimental_options()
@tf_export('config.optimizer.set_experimental_options')
def set_optimizer_experimental_options(options):
"""Set experimental optimizer options.
Note that optimizations are only applied in graph mode, (within tf.function).
In addition, as these are experimental options, the list is subject to change.
Args:
options: Dictionary of experimental optimizer options to configure.
Valid keys:
- layout_optimizer: Optimize tensor layouts
e.g. This will try to use NCHW layout on GPU which is faster.
- constant_folding: Fold constants
Statically infer the value of tensors when possible, and materialize the
result using constants.
- shape_optimization: Simplify computations made on shapes.
- remapping: Remap subgraphs onto more efficient implementations.
- arithmetic_optimization: Simplify arithmetic ops with common
sub-expression elimination and arithmetic simplification.
- dependency_optimization: Control dependency optimizations. Remove
redundant control dependencies, which may enable other optimization.
This optimizer is also essential for pruning Identity and NoOp nodes.
- loop_optimization: Loop optimizations.
- function_optimization: Function optimizations and inlining.
- debug_stripper: Strips debug-related nodes from the graph.
- disable_model_pruning: Disable removal of unnecessary ops from the graph
- scoped_allocator_optimization: Try to allocate some independent Op
outputs contiguously in order to merge or eliminate downstream Ops.
- pin_to_host_optimization: Force small ops onto the CPU.
- implementation_selector: Enable the swap of kernel implementations based
on the device placement.
- auto_mixed_precision: Change certain float32 ops to float16 on Volta
GPUs and above. Without the use of loss scaling, this can cause
numerical underflow (see
`keras.mixed_precision.experimental.LossScaleOptimizer`).
- disable_meta_optimizer: Disable the entire meta optimizer.
- min_graph_nodes: The minimum number of nodes in a graph to optimizer.
For smaller graphs, optimization is skipped.
"""
context.context().set_optimizer_experimental_options(options)
@tf_export('config.get_soft_device_placement')
def get_soft_device_placement():
"""Get if soft device placement is enabled.
If enabled, an op will be placed on CPU if any of the following are true
1. there's no GPU implementation for the OP
2. no GPU devices are known or registered
3. need to co-locate with reftype input(s) which are from CPU
Returns:
If soft placement is enabled.
"""
return context.context().soft_device_placement
@tf_export('config.set_soft_device_placement')
def set_soft_device_placement(enabled):
"""Set if soft device placement is enabled.
If enabled, an op will be placed on CPU if any of the following are true
1. there's no GPU implementation for the OP
2. no GPU devices are known or registered
3. need to co-locate with reftype input(s) which are from CPU
Args:
enabled: Whether to enable soft placement.
"""
context.context().soft_device_placement = enabled
@tf_export('config.experimental.get_device_policy')
def get_device_policy():
"""Gets the current device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
This function only gets the device policy for the current thread. Any
subsequently started thread will again use the default policy.
Returns:
Current thread device policy
"""
device_policy = context.context().device_policy
if device_policy == context.DEVICE_PLACEMENT_SILENT:
return 'silent'
elif device_policy == context.DEVICE_PLACEMENT_SILENT_FOR_INT32:
return 'silent_for_int32'
elif device_policy == context.DEVICE_PLACEMENT_WARN:
return 'warn'
elif device_policy == context.DEVICE_PLACEMENT_EXPLICIT:
return 'explicit'
else:
raise ValueError('Not a valid device policy: %r' % device_policy)
@tf_export('config.experimental.set_device_policy')
def set_device_policy(device_policy):
"""Sets the current thread device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
When using the default, an appropriate policy will be picked automatically.
The default policy may change over time.
This function only sets the device policy for the current thread. Any
subsequently started thread will again use the default policy.
Args:
device_policy: A device policy.
Valid values:
- None: Switch to a system default.
- 'warn': Copies the tensors which are not on the right device and logs
a warning.
- 'explicit': Raises an error if the placement is not as required.
- 'silent': Silently copies the tensors. Note that this may hide
performance problems as there is no notification provided when
operations are blocked on the tensor being copied between devices.
- 'silent_for_int32': silently copies `int32` tensors, raising errors on
the other ones.
Raises:
ValueError: If an invalid `device_policy` is passed.
"""
if device_policy == 'silent':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT
elif device_policy == 'silent_for_int32':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32
elif device_policy == 'warn':
context.context().device_policy = context.DEVICE_PLACEMENT_WARN
elif device_policy == 'explicit':
context.context().device_policy = context.DEVICE_PLACEMENT_EXPLICIT
elif device_policy is None:
context.context().device_policy = None
else:
raise ValueError('Not a valid device policy: %r' % device_policy)
@tf_export('config.experimental.get_synchronous_execution')
def get_synchronous_execution():
"""Gets whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
Returns:
Current thread execution mode
"""
return context.context().execution_mode == context.SYNC
@tf_export('config.experimental.set_synchronous_execution')
def set_synchronous_execution(enable):
"""Specifies whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
When `enable` is set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Args:
enable: Whether operations should be dispatched synchronously.
Valid values:
- None: sets the system default.
- True: executes each operation synchronously.
- False: executes each operation asynchronously.
"""
if enable is None:
context.context().execution_mode = None
elif enable:
context.context().execution_mode = context.SYNC
else:
context.context().execution_mode = context.ASYNC
@tf_export('config.list_physical_devices',
'config.experimental.list_physical_devices')
@deprecation.deprecated_endpoints(
'config.experimental.list_physical_devices')
def list_physical_devices(device_type=None):
"""Return a list of physical devices visible to the host runtime.
Physical devices are hardware devices present on the host machine. By default
all discovered CPU and GPU devices are considered visible.
This API allows querying the physical hardware resources prior to runtime
initialization. Thus, giving an opportunity to call any additional
configuration APIs. This is in contrast to `tf.config.list_logical_devices`,
which triggers runtime initialization in order to list the configured devices.
The following example lists the number of visible GPUs on the host.
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> print("Num GPUs:", len(physical_devices))
Num GPUs: ...
However, the number of GPUs available to the runtime may change during runtime
initialization due to marking certain devices as not visible or configuring
multiple logical devices.
Args:
device_type: (optional string) Only include devices matching this device
type. For example "CPU" or "GPU".
Returns:
List of discovered `tf.config.PhysicalDevice` objects
"""
return context.context().list_physical_devices(device_type)
@tf_export('config.list_logical_devices',
'config.experimental.list_logical_devices')
@deprecation.deprecated_endpoints(
'config.experimental.list_logical_devices')
def list_logical_devices(device_type=None):
"""Return a list of logical devices created by runtime.
Logical devices may correspond to physical devices or remote devices in the
cluster. Operations and tensors may be placed on these devices by using the
`name` of the `tf.config.LogicalDevice`.
Calling `tf.config.list_logical_devices` triggers the runtime to configure any
`tf.config.PhysicalDevice` visible to the runtime, thereby preventing
further configuration. To avoid runtime initialization, call
`tf.config.list_physical_devices` instead.
For example:
>>> logical_devices = tf.config.list_logical_devices('GPU')
>>> if len(logical_devices) > 0:
... # Allocate on GPU:0
... with tf.device(logical_devices[0].name):
... one = tf.constant(1)
... # Allocate on GPU:1
... with tf.device(logical_devices[1].name):
... two = tf.constant(2)
Args:
device_type: (optional string) Only include devices matching this device
type. For example "CPU" or "GPU".
Returns:
List of initialized `LogicalDevice`s
"""
return context.context().list_logical_devices(device_type=device_type)
@tf_export('config.get_visible_devices',
'config.experimental.get_visible_devices')
@deprecation.deprecated_endpoints(
'config.experimental.get_visible_devices')
def get_visible_devices(device_type=None):
"""Get the list of visible physical devices.
Returns the list of `PhysicalDevice`s currently marked as visible to the
runtime. A visible device will have at least one `LogicalDevice` associated
with it once the runtime is initialized.
The following example verifies all visible GPUs have been disabled:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... # Disable all GPUS
... tf.config.set_visible_devices([], 'GPU')
... visible_devices = tf.config.get_visible_devices()
... for device in visible_devices:
... assert device.device_type != 'GPU'
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
device_type: (optional string) Only include devices matching this device
type. For example "CPU" or "GPU".
Returns:
List of visible `PhysicalDevice`s
"""
return context.context().get_visible_devices(device_type)
@tf_export('config.set_visible_devices',
'config.experimental.set_visible_devices')
@deprecation.deprecated_endpoints(
'config.experimental.set_visible_devices')
def set_visible_devices(devices, device_type=None):
"""Set the list of visible devices.
Specifies which `PhysicalDevice` objects are visible to the runtime.
TensorFlow will only allocate memory and place operations on visible
physical devices, as otherwise no `LogicalDevice` will be created on them.
By default all discovered devices are marked as visible.
The following example demonstrates disabling the first GPU on the machine.
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... # Disable first GPU
... tf.config.set_visible_devices(physical_devices[1:], 'GPU')
... logical_devices = tf.config.list_logical_devices('GPU')
... # Logical device was not created for first GPU
... assert len(logical_devices) == len(physical_devices) - 1
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
devices: List of `PhysicalDevice`s to make visible
device_type: (optional) Only configure devices matching this device type.
For example "CPU" or "GPU". Other devices will be left unaltered.
Raises:
ValueError: If argument validation fails.
RuntimeError: Runtime is already initialized.
"""
context.context().set_visible_devices(devices, device_type)
@tf_export('config.experimental.get_memory_growth')
def get_memory_growth(device):
"""Get if memory growth is enabled for a `PhysicalDevice`.
If memory growth is enabled for a `PhysicalDevice`, the runtime initialization
will not allocate all memory on the device.
For example:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... tf.config.experimental.set_memory_growth(physical_devices[0], True)
... assert tf.config.experimental.get_memory_growth(physical_devices[0])
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
device: `PhysicalDevice` to query
Returns:
A boolean indicating the memory growth setting for the `PhysicalDevice`.
Raises:
ValueError: Invalid `PhysicalDevice` specified.
"""
return context.context().get_memory_growth(device)
@tf_export('config.experimental.set_memory_growth')
def set_memory_growth(device, enable):
"""Set if memory growth should be enabled for a `PhysicalDevice`.
If memory growth is enabled for a `PhysicalDevice`, the runtime initialization
will not allocate all memory on the device. Memory growth cannot be configured
on a `PhysicalDevice` with virtual devices configured.
For example:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... tf.config.experimental.set_memory_growth(physical_devices[0], True)
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
device: `PhysicalDevice` to configure
enable: (Boolean) Whether to enable or disable memory growth
Raises:
ValueError: Invalid `PhysicalDevice` specified.
RuntimeError: Runtime is already initialized.
"""
context.context().set_memory_growth(device, enable)
@tf_export('config.get_logical_device_configuration',
'config.experimental.get_virtual_device_configuration')
@deprecation.deprecated_endpoints(
'config.experimental.get_virtual_device_configuration')
def get_logical_device_configuration(device):
"""Get the virtual device configuration for a `tf.config.PhysicalDevice`.
Returns the list of `tf.config.LogicalDeviceConfiguration`
objects previously configured by a call to
`tf.config.set_logical_device_configuration`.
For example:
>>> physical_devices = tf.config.list_physical_devices('CPU')
>>> assert len(physical_devices) == 1, "No CPUs found"
>>> configs = tf.config.get_logical_device_configuration(
... physical_devices[0])
>>> try:
... assert configs is None
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration()])
... configs = tf.config.get_logical_device_configuration(
... physical_devices[0])
... assert len(configs) == 2
... except:
... # Cannot modify virtual devices once initialized.
... pass
Args:
device: `PhysicalDevice` to query
Returns:
List of `tf.config.LogicalDeviceConfiguration` objects or
`None` if no virtual device configuration has been set for this physical
device.
"""
return context.context().get_logical_device_configuration(device)
@tf_export('config.set_logical_device_configuration',
'config.experimental.set_virtual_device_configuration')
@deprecation.deprecated_endpoints(
'config.experimental.set_virtual_device_configuration')
def set_logical_device_configuration(device, logical_devices):
"""Set the logical device configuration for a `tf.config.PhysicalDevice`.
A visible `tf.config.PhysicalDevice` will by default have a single
`tf.config.LogicalDevice` associated with it once the runtime is initialized.
Specifying a list of `tf.config.LogicalDeviceConfiguration` objects allows
multiple devices to be created on the same `tf.config.PhysicalDevice`.
The following example splits the CPU into 2 logical devices:
>>> physical_devices = tf.config.list_physical_devices('CPU')
>>> assert len(physical_devices) == 1, "No CPUs found"
>>> # Specify 2 virtual CPUs. Note currently memory limit is not supported.
>>> try:
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration()])
... logical_devices = tf.config.list_logical_devices('CPU')
... assert len(logical_devices) == 2
...
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration()])
... except:
... # Cannot modify logical devices once initialized.
... pass
The following example splits the GPU into 2 logical devices with 100 MB each:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(memory_limit=100),
... tf.config.LogicalDeviceConfiguration(memory_limit=100)])
...
... logical_devices = tf.config.list_logical_devices('GPU')
... assert len(logical_devices) == len(physical_devices) + 1
...
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(memory_limit=10),
... tf.config.LogicalDeviceConfiguration(memory_limit=10)])
... except:
... # Invalid device or cannot modify logical devices once initialized.
... pass
Args:
device: The `PhysicalDevice` to configure.
logical_devices: (optional) List of `tf.config.LogicalDeviceConfiguration`
objects to allocate for the specified `PhysicalDevice`. If None, the
default configuration will be used.
Raises:
ValueError: If argument validation fails.
RuntimeError: Runtime is already initialized.
"""
context.context().set_logical_device_configuration(device, logical_devices)
@tf_export('config.experimental.enable_mlir_bridge')
def enable_mlir_bridge():
"""Enables experimental MLIR-Based TensorFlow Compiler Bridge.
DO NOT USE, DEV AND TESTING ONLY AT THE MOMENT.
NOTE: MLIR-Based TensorFlow Compiler is under active development and has
missing features, please refrain from using. This API exists for development
and testing only.
TensorFlow Compiler Bridge (TF Bridge) is responsible for translating parts
of TensorFlow graph into a form that can be accepted as an input by a backend
compiler such as XLA.
"""
context.context().enable_mlir_bridge = True
@tf_export('config.experimental.enable_mlir_graph_optimization')
def enable_mlir_graph_optimization():
"""Enables experimental MLIR-Based TensorFlow Compiler Optimizations.
DO NOT USE, DEV AND TESTING ONLY AT THE MOMENT.
NOTE: MLIR-Based TensorFlow Compiler is under active development and has
missing features, please refrain from using. This API exists for development
and testing only.
TensorFlow Compiler Optimizations are responsible general graph level
optimizations that in the current stack mostly done by Grappler graph
optimizers.
"""
context.context().enable_mlir_graph_optimization = True
@tf_export('config.experimental.disable_mlir_bridge')
def disable_mlir_bridge():
"""Disables experimental MLIR-Based TensorFlow Compiler Bridge."""
context.context().enable_mlir_bridge = False
@tf_export('config.experimental.disable_mlir_graph_optimization')
def disable_mlir_graph_optimization():
"""Disables experimental MLIR-Based TensorFlow Compiler Optimizations."""
context.context().enable_mlir_graph_optimization = False
| |
# -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals
from . logEnvironmentModule import *
from . errorObjs import *
import random
class MyAgent(LogAgent):
"""Test the LogAgent.
This agent does nothing more than the basic agent contained in
MyAgent.py.
Well, in fact it IS MyAgent.py.
However, there is a difference: below you can find an implementation of
Floyd-Warshall algorithm.
Feel free to use it as you wish.
There is a simple environment that you can use to test the algorithm,
named floydConfig.json.
The agent, upon completion of execution,
will print the minimumPath matrix.
"""
def __init__(self):
super(MyAgent, self).__init__()
def floyd_warshall(self, stat):
"""
Calculate minimum path between all airports
using a straightforward implementation of Floyd-Warshall algorithm.
This function gets info about the graph edges (air routes)
from the environment and creates a matrix
containing the minimum path from
each airport to each other.
"""
graph = []
airports = stat.airports.keys()
matrix = {v1: {v2: 0 if v1 == v2 else float('inf') for v2 in airports}
for v1 in airports}
for v2 in airports:
for v1 in stat.airports[v2].neighbors:
print (stat.airports[v2].neighbors[v1])
matrix[v1][v2] = stat.airports[v2].neighbors[v1]
airports = matrix.keys()
costMatrix = matrix
for v2 in airports:
costMatrix = {v1: {v3: min(costMatrix[v1][v3], costMatrix[v1][v2] + costMatrix[v2][v3])
for v3 in airports}
for v1 in airports}
print(costMatrix)
def itr_solve(self, status):
stat = status.clone
h = 0
possible_moves = []
list_of_actions = []
while(not stat.check_goal()):
for move in stat.moves:
clone = stat.clone
clone.execute([move])
h2 = self.heuristic(stat.goal, clone)
if h < h2:
h = h2
stat = clone.clone
list_of_actions.append(move)
continue
elif h == h2:
possible_moves.append(move)
print('OPS! RADOM!')
move = random.choice(possible_moves)
possible_moves = []
print(move)
stat.execute([move])
list_of_actions.append(move)
self.heuristic(stat.goal, stat)
self.floyd_warshall(stat)
return list_of_actions
def heuristic(self, goal, status):
'''
http://www.urticator.net/essay/3/326.html
'''
results = 0
# print("eccomi!",status.airports)
for destination, objs in goal.items():
#print('SUPER WHAT?!', objs, destination)
if destination in list(status.airports.keys()):
for obj in objs:
# print('WHAT?!',obj)
if obj in status.airports[destination]:
results = results + 5
elif destination in list(status.airplanes.keys()):
for obj in objs:
# print('WHAT?!',obj)
if obj in status.airplanes[destination]:
results = results + 5
print('heuristic', results)
return results
def solve(self, status, goal):
'''
Dear fellow Student,
here I add some more examples on how to manipulate
the objects from the logEnvironmentModule.
I also added a VERY dumb agent.
If your agent can't do better than this, you should work
a little more on it.
I hope it can help a little.
Best regards,
Robert
'''
# Here are more examples of how you can play around
# with the objects in this module
'''
print(status)
for airport in status.airports:
print(airport)
for plane in status.airports[airport].airplanes:
print("Contains", plane)
for box in status.airports[airport].airplanes[plane].boxes:
print("scatole nell'aereo", box)
print("maxboxes:", status.airports[airport].airplanes[plane].maxbox)
for neighbor in status.airports[airport].neighbors:
print("Is near to", neighbor)
print("with distance", status.airports[airport].neighbors[neighbor])
for box in status.airports[airport].boxes:
print("Contains also", box)
print(status.goal.items())
for goal in status.goal:
print(goal)
print(status.goal[goal])
print(dir(status.goal[goal]))
print(status.goal[goal])'''
# This is a small code snippet to show you how to
# hash() a state. It could be usefull to someone..
"""
print(status.clone)
print(hash(print(status.clone)))
print(hash(repr(status))) #this is how you should do it!
clone = status.clone
print(clone == status)
clone.execute([status.moves[0],status.moves[1],status.moves[6]])
print(hash(repr(clone)))
print(clone)
print(clone == status)
#print(status.moves)"""
return self.itr_solve(status)
# # you should do this to try your agent only once
# """
# test_env.add_agent(MyAgent())
# print("MAIN Goal reached:", test_env.check_goal())
# test_env.execute()
# print("MAIN Goal reached:", test_env.check_goal())
# print("MAIN Agent score:", test_env.formatted_score())
# """
# # here I try to run my simple agent many times
# # then I get a mean value for the score
# itrNum = 10
# partialScore = 0
# goalAlwaysReached = True
# i = 0
# while i < itrNum:
# asd = LogEnvironment("testconfig_simple.json")
# asd.add_agent(MyAgent())
# asd.check_goal()
# asd.execute()
# asd.check_goal()
# if(not asd.check_goal()):
# goalAlwaysReached = False
# partialScore += asd.score()
# i += 1
# meanScore = partialScore / itrNum
# print("Mean score is:", meanScore)
# if(not goalAlwaysReached):
# print("Goal not always reached")
# OLD EXAMPLE
# class MyAgent(LogAgent):
# """Test the LogAgent."""
# def __init__(self):
# super(MyAgent, self).__init__()
# def solve(self, status, goal):
# clone = status.clone
# print("----- Print of CLONE -----")
# print(clone)
# print("CLONE AIRPORTS:", clone.airports)
# print("CLONE AIRPLANES:", clone.airplanes)
# print("CLONE BOXES:", clone.boxes)
# print("-------------------------")
# print("----- Print details of CLONE -----")
# for airport in clone.airports:
# print(airport)
# for airport_name, airport_obj in clone.airports.items():
# print("AIRPORT NAME:", airport_name)
# print(airport_obj)
# for box in clone.airports.Airport_1.boxes:
# print("BOX NAME", box)
# for airplane in clone.airports.Airport_1.airplanes:
# print("AIRPLANE NAME:", airplane)
# for box in clone.airports.Airport_1.airplanes.Airplane_1.boxes:
# print("BOX NAME", box)
# print("-------------------------")
# print("-->STASUS == CLONE:", status == clone)
# print("-->GOAL:", status.goal)
# print("-->CLONE MOVES:", clone.moves)
# list_of_actions = [action for action in clone.moves]
# print("-->EXECUTE MOVES IN CLONE:", list_of_actions)
# clone.execute(list_of_actions)
# print("-->STASUS == CLONE:", status == clone)
# print("-->CHECK GOAL IN CLONE:", clone.check_goal())
# new_clone = clone.clone
# print("-->NEW CLONE")
# print("-->STASUS != CLONE:", status != clone)
# print("-->STASUS == NEW_CLONE:", status == new_clone)
# print("-->NEW_CLONE == CLONE:", new_clone == clone)
# for box in clone.airports.Airport_2.airplanes.Airplane_1.boxes:
# print("-->BOX in Airplane_1 in Airport_2:", box)
# print("-->NEW_CLONE MOVES:", new_clone.moves)
# print("-->LAST MOVE CHECK:",
# new_clone.moves[0] == ("unload", "Box_1", "Airplane_1"))
# try:
# new_clone.execute(("unload", "Box_1", "Airplane_1"))
# except ActionNotAList as e:
# print("-->!!! ERROR:", e)
# new_clone.execute([("unload", "Box_1", "Airplane_1")])
# print("-->CHECK GOAL IN NEW_CLONE", new_clone.check_goal())
# print("-------------------------")
# list_of_actions.append(("unload", "Box_1", "Airplane_1"))
# return list_of_actions
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import pandas as pd
import tensorflow as tf
import utils.utils as utils
class FastGradientSign_AdvGen:
def __init__(self, cmd_args, input_x_shape, saver, config):
self.cmd_args = cmd_args
self.input_x_shape = input_x_shape
self.saver = saver
self.config = config
self.config = config
def run(self, input_dict):
x = input_dict["x"]
y_ = input_dict["y_"]
y_conv = input_dict["y_conv"]
keep_prob = input_dict["keep_prob"]
test_data = input_dict["test_data"]
test_labels = input_dict["test_labels"]
checkpoint_path = self.config.get('main', 'checkpoint_path')
eval_frequency = self.config.getint('main', 'eval_frequency')
num_classes = self.config.getint('main', 'num_classes')
image_output_path = self.config.get('main', 'image_output_path')
adversarial_perturbation_min = self.config.getfloat(
'main', 'adversarial_perturbation_min')
adversarial_perturbation_max = self.config.getfloat(
'main', 'adversarial_perturbation_max')
adversarial_perturbation_steps = self.config.getfloat(
'main', 'adversarial_perturbation_steps')
not_fooled = .0
fooled = .0
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
cross_entropy = -tf.reduce_sum(tf.cast(y_, "float") * tf.log(y_conv))
grad = tf.gradients(cross_entropy, x)
sess = tf.Session()
tf.initialize_all_variables().run(session=sess)
self.saver.restore(sess, checkpoint_path)
df = pd.DataFrame()
start_time = time.time()
if self.cmd_args.test:
iter_range = xrange(1)
adversarial_perturbation_max = adversarial_perturbation_min
adversarial_perturbation_steps = 1
else:
iter_range = xrange(len(test_data))
for idx in iter_range:
if idx % eval_frequency == 0:
elapsed_time = time.time() - start_time
print('Adversarial image generation step %d of %d, (%.1fms/step)' %
(idx, len(test_data),
1000 * elapsed_time / eval_frequency))
image = test_data[idx]
label = test_labels[idx]
y_onehot = np.eye(num_classes)[label]
pred = sess.run(y_conv, feed_dict={x: (np.reshape(image, self.input_x_shape)), keep_prob: 1.0})
pred_label = np.argmax(pred)
grad_val = sess.run(grad, feed_dict={x:np.reshape(image, self.input_x_shape), y_:y_onehot, keep_prob: 1.0})
grad_sign = np.sign(grad_val[0])
grad_norm = sum([np.abs(W) for W in grad_val[0]])
for perturbation in np.linspace(adversarial_perturbation_min,
adversarial_perturbation_max,
adversarial_perturbation_steps):
adv_image = perturbation * grad_sign + image
adv_pred = sess.run(y_conv, feed_dict={x:adv_image, keep_prob: 1.0})
adv_label = np.argmax(adv_pred)
if (adv_label != label): fooled = fooled + 1
else: not_fooled = not_fooled + 1
series = pd.Series([idx, label, pred_label, adv_label, grad_norm, pred, adv_pred, image, adv_image,
perturbation, grad_val],
index = ["Idx", "True Label", "Predicted Label", "Predicted Label Adversarial", \
"Gradient Norm", "Predicted Prob", "Predicted Prob Adversarial", "Image", \
"Adversarial Image", "Gradient Step", "Gradient"])
df = df.append(series, ignore_index=True)
print("Adversarial sample yield: ", fooled/(fooled+not_fooled))
print("Adversarial samples fooled: ", fooled)
print("Adversarial samples not fooled: ", not_fooled)
return df
def run_queue(self, input_dict):
graph = input_dict["graph"]
images = input_dict["x"]
raw_images = input_dict["x_raw"]
labels = input_dict["y_"]
logits = input_dict["y_conv"]
logits_single = input_dict["y_conv_single"]
x = input_dict["adv_image_placeholder"]
adversarial_perturbation_min = self.config.getfloat(
'main', 'adversarial_perturbation_min')
adversarial_perturbation_max = self.config.getfloat(
'main', 'adversarial_perturbation_max')
adversarial_perturbation_steps = self.config.getfloat(
'main', 'adversarial_perturbation_steps')
with graph.as_default():
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
float(self.config.get('main', 'moving_average_decay')))
variables_to_restore = variable_averages.variables_to_restore()
del variables_to_restore['Variable']
saver = tf.train.Saver(variables_to_restore)
y_ = tf.one_hot(indices=tf.cast(labels, "int64"),
depth=int(self.config.get('main', 'num_classes')),
on_value=1.0,
off_value=0.0)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, y_)
grad = tf.gradients(cross_entropy, images)
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(self.config.get('main', 'checkpoint_dir'))
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
sample_count = int(self.config.get('main', 'num_examples_per_epoch_eval'))
true_count = 0 # Counts the number of correct predictions.
step = 0
pred_correct = 0
adv_correct = 0
adv_diff = 0
while step < sample_count and not coord.should_stop():
raw_images_val, images_val, labels_val, cross_entropy_val, grad_val = sess.run([raw_images, images, labels, cross_entropy, grad[0]])
step += 1
for i in range(len(images_val)):
image = raw_images_val[i]
true_label = labels_val[i]
grad_sign = np.sign(grad_val[i])
grad_norm = sum([np.abs(W) for W in grad_val[i]])
one_adv_correct = False
one_pred_correct = False
one_adv_diff = False
for perturbation in np.linspace(adversarial_perturbation_min,
adversarial_perturbation_max,
adversarial_perturbation_steps):
adv_image = perturbation * grad_sign + image
adv_image_reshaped = np.reshape(adv_image, np.insert(adv_image.shape, 0 , 1))
raw_image_reshaped = np.reshape(image, np.insert(image.shape, 0 , 1))
pred_logit = sess.run(logits_single, feed_dict={x:raw_image_reshaped})
pred_label = np.argmax(pred_logit)
adv_pred = sess.run(logits_single, feed_dict={x:adv_image_reshaped})
adv_label = np.argmax(adv_pred)
if pred_label == true_label:
one_pred_correct = True
if adv_label == true_label:
one_adv_correct = True
if adv_label != pred_label:
one_adv_diff = True
if one_pred_correct:
pred_correct = pred_correct + 1
if one_adv_correct:
adv_correct = adv_correct + 1
if one_adv_diff:
adv_diff = adv_diff + 1
print("PRED CORRECT: " + str(pred_correct))
print("ADV CORRECT: " + str(adv_correct))
print("ADV DIFF: " + str(adv_diff))
except Exception as e:
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
| |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Constants and static functions to support protocol buffer wire format."""
import struct
from google.protobuf import descriptor
from google.protobuf import message
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
# These numbers identify the wire type of a protocol buffer value.
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
# tag-and-type to store one of these WIRETYPE_* constants.
# These values must match WireType enum in google/protobuf/wire_format.h.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
# Bounds for various integer types.
INT32_MAX = int((1 << 31) - 1)
INT32_MIN = int(-(1 << 31))
UINT32_MAX = (1 << 32) - 1
INT64_MAX = (1 << 63) - 1
INT64_MIN = -(1 << 63)
UINT64_MAX = (1 << 64) - 1
# "struct" format strings that will encode/decode the specified formats.
FORMAT_UINT32_LITTLE_ENDIAN = '<I'
FORMAT_UINT64_LITTLE_ENDIAN = '<Q'
FORMAT_FLOAT_LITTLE_ENDIAN = '<f'
FORMAT_DOUBLE_LITTLE_ENDIAN = '<d'
# We'll have to provide alternate implementations of AppendLittleEndian*() on
# any architectures where these checks fail.
if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4:
raise AssertionError('Format "I" is not a 32-bit number.')
if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8:
raise AssertionError('Format "Q" is not a 64-bit number.')
def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise message.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type
def UnpackTag(tag):
"""The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
"""
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
def ZigZagEncode(value):
"""ZigZag Transform: Encodes signed integers so that they can be
effectively used with varint encoding. See wire_format.h for
more details.
"""
if value >= 0:
return value << 1
return (value << 1) ^ (~0)
def ZigZagDecode(value):
"""Inverse of ZigZagEncode()."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0)
# The *ByteSize() functions below return the number of bytes required to
# serialize "field number + type" information and then serialize the value.
def Int32ByteSize(field_number, int32):
return Int64ByteSize(field_number, int32)
def Int32ByteSizeNoTag(int32):
return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32)
def Int64ByteSize(field_number, int64):
# Have to convert to uint before calling UInt64ByteSize().
return UInt64ByteSize(field_number, 0xffffffffffffffff & int64)
def UInt32ByteSize(field_number, uint32):
return UInt64ByteSize(field_number, uint32)
def UInt64ByteSize(field_number, uint64):
return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64)
def SInt32ByteSize(field_number, int32):
return UInt32ByteSize(field_number, ZigZagEncode(int32))
def SInt64ByteSize(field_number, int64):
return UInt64ByteSize(field_number, ZigZagEncode(int64))
def Fixed32ByteSize(field_number, fixed32):
return TagByteSize(field_number) + 4
def Fixed64ByteSize(field_number, fixed64):
return TagByteSize(field_number) + 8
def SFixed32ByteSize(field_number, sfixed32):
return TagByteSize(field_number) + 4
def SFixed64ByteSize(field_number, sfixed64):
return TagByteSize(field_number) + 8
def FloatByteSize(field_number, flt):
return TagByteSize(field_number) + 4
def DoubleByteSize(field_number, double):
return TagByteSize(field_number) + 8
def BoolByteSize(field_number, b):
return TagByteSize(field_number) + 1
def EnumByteSize(field_number, enum):
return UInt32ByteSize(field_number, enum)
def StringByteSize(field_number, string):
return BytesByteSize(field_number, string.encode('utf-8'))
def BytesByteSize(field_number, b):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(len(b))
+ len(b))
def GroupByteSize(field_number, message):
return (2 * TagByteSize(field_number) # START and END group.
+ message.ByteSize())
def MessageByteSize(field_number, message):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(message.ByteSize())
+ message.ByteSize())
def MessageSetItemByteSize(field_number, msg):
# First compute the sizes of the tags.
# There are 2 tags for the beginning and ending of the repeated group, that
# is field number 1, one with field number 2 (type_id) and one with field
# number 3 (message).
total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3))
# Add the number of bytes for type_id.
total_size += _VarUInt64ByteSizeNoTag(field_number)
message_size = msg.ByteSize()
# The number of bytes for encoding the length of the message.
total_size += _VarUInt64ByteSizeNoTag(message_size)
# The size of the message.
total_size += message_size
return total_size
def TagByteSize(field_number):
"""Returns the bytes required to serialize a tag with this field number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
# Private helper function for the *ByteSize() functions above.
def _VarUInt64ByteSizeNoTag(uint64):
"""Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned.
"""
if uint64 <= 0x7f: return 1
if uint64 <= 0x3fff: return 2
if uint64 <= 0x1fffff: return 3
if uint64 <= 0xfffffff: return 4
if uint64 <= 0x7ffffffff: return 5
if uint64 <= 0x3ffffffffff: return 6
if uint64 <= 0x1ffffffffffff: return 7
if uint64 <= 0xffffffffffffff: return 8
if uint64 <= 0x7fffffffffffffff: return 9
if uint64 > UINT64_MAX:
raise message.EncodeError('Value out of range: %d' % uint64)
return 10
NON_PACKABLE_TYPES = (
descriptor.FieldDescriptor.TYPE_STRING,
descriptor.FieldDescriptor.TYPE_GROUP,
descriptor.FieldDescriptor.TYPE_MESSAGE,
descriptor.FieldDescriptor.TYPE_BYTES
)
def IsTypePackable(field_type):
"""Return true iff packable = true is valid for fields of this type.
Args:
field_type: a FieldDescriptor::Type value.
Returns:
True iff fields of this type are packable.
"""
return field_type not in NON_PACKABLE_TYPES
| |
# -*- coding: utf-8 -*-
import os
import logging
from pyramid.config import Configurator
from pyramid.session import SignedCookieSessionFactory
from pyramid.view import view_config
from waitress import serve
import psycopg2
from contextlib import closing
from pyramid.events import NewRequest, subscriber
import datetime
from pyramid.httpexceptions import HTTPFound, HTTPInternalServerError, HTTPForbidden
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from cryptacular.bcrypt import BCRYPTPasswordManager
from pyramid.security import remember, forget
import markdown
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
)
from zope.sqlalchemy import ZopeTransactionExtension
import json
here = os.path.dirname(os.path.abspath(__file__))
# MATTLEE = "dbname=test-learning-journal user=postgres password=admin"
# ON_MATTS = "C:\\Users\\jefimenko\\code_fellows\\dev_accel\\another-journal\\learning-journal\\journal.py"
# DB_SCHEMA = """
# CREATE TABLE IF NOT EXISTS entries (
# id serial PRIMARY KEY,
# title VARCHAR (127) NOT NULL,
# text TEXT NOT NULL,
# created TIMESTAMP NOT NULL
# )
# """
# INSERT_ENTRY = """
# INSERT INTO entries(title, text, created) VALUES (%s, %s, %s)
# """
# # READ_ENTRIES = """
# # SELECT * FROM entries
# # """
# SELECT_ENTRIES = """
# SELECT id, title, text, created FROM entries ORDER BY created DESC
# """
# replaces def close, open, and connect db
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
class Entry(Base):
__tablename__ = 'entries'
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
title = sa.Column(sa.Unicode(127), nullable=False)
text = sa.Column(sa.UnicodeText, nullable=False)
created = sa.Column(
sa.DateTime, nullable=False, default=datetime.datetime.utcnow
)
def __repr__(self):
return u"{}: {}".format(self.__class__.__name__, self.title)
@classmethod
def all(cls):
return DBSession.query(cls).order_by(cls.created.desc()).all()
@classmethod
def by_id(cls, id):
return DBSession.query(cls).filter(cls.id==id).one()
@classmethod
def from_request(cls, request):
title = request.params.get('title', None)
text = request.params.get('text', None)
created = datetime.datetime.utcnow()
new_entry = cls(title=title, text=text, created=created)
DBSession.add(new_entry)
@classmethod
def most_recent(cls):
return DBSession.query(cls).order_by(cls.created.desc()).first()
@classmethod
def from_request_edit(cls, request):
title = request.params.get('title', None)
text = request.params.get('text', None)
id = request.params.get('id', None)
DBSession.query(cls).filter(cls.id == id).update({"title": title, "text": text})
logging.basicConfig()
log = logging.getLogger(__file__)
# @view_config(route_name='home', renderer='string')
# def home(request):
# return "Hello World"
# # connect to the db
# def connect_db(settings):
# """Return a connection to the configured database"""
# return psycopg2.connect(settings['db'])
# # a function to initialize db
# def init_db():
# """Create database dables defined by DB_SCHEMA
# Warning: This function will not update existing table definitions
# """
# settings = {}
# settings['db'] = os.environ.get(
# 'DATABASE_URL', 'dbname=learning-journal user=mark'
# )
# # For running on Matt's computer
# if ON_MATTS == os.path.abspath(__file__):
# settings['db'] = MATTLEE
# with closing(connect_db(settings)) as db:
# db.cursor().execute(DB_SCHEMA)
# db.commit()
# @subscriber(NewRequest)
# def open_connection(event):
# request = event.request
# settings = request.registry.settings
# request.db = connect_db(settings)
# request.add_finished_callback(close_connection)
# def close_connection(request):
# """close the database connection for this request
# If there has been an error in the processing of the request, abort any
# open transactions.
# """
# db = getattr(request, 'db', None)
# if db is not None:
# if request.exception is not None:
# db.rollback()
# else:
# db.commit()
# request.db.close()
# def write_entry(request):
# """Create an entry in the db."""
# title = request.params.get('title', None)
# text = request.params.get('text', None)
# created = datetime.datetime.utcnow()
# request.db.cursor().execute(INSERT_ENTRY, (title, text, created))
# return
UPDATE_ENTRY = """
UPDATE entries SET (title, text) = (%s, %s) WHERE id=%s
"""
SELECT_MOST_RECENT = """
SELECT id, title, text, created FROM entries ORDER BY created DESC LIMIT 1
"""
### ported to ORM, but does not update database
@view_config(route_name='new', renderer='json')
def add2_entry(request):
"""View function for adding entry, passes request to write_entry.
If error, return HTTPInternalServerError. If not, send back to home page.
"""
if request.authenticated_userid:
if request.method == 'POST':
try:
# write_entry(request)
Entry.from_request(request)
except psycopg2.Error:
# this will catch any errors generated by the database
return HTTPInternalServerError
# return HTTPFound(request.route_url('home'))
# cursor = request.db.cursor()
# cursor.execute(SELECT_MOST_RECENT)
# keys = ('id', 'title', 'text', 'created')
# temp = dict(zip(keys, cursor.fetchone()))
temp = Entry.most_recent()
# import pdb; pdb.set_trace()
temp.created = temp.created.strftime('%b %d, %Y')
del temp.__dict__['_sa_instance_state']
return temp.__dict__
else:
return HTTPForbidden()
@view_config(route_name='home', renderer='templates/list.jinja2')
def read_entries(request):
"""Return a dictionary with entries and their data.
Returns by creation date, most recent first.
"""
# cursor = request.db.cursor()
# cursor.execute(SELECT_ENTRIES)
# keys = ('id', 'title', 'text', 'created')
# entries = [dict(zip(keys, row)) for row in cursor.fetchall()]
entries = Entry.all()
return {'entries': entries}
def do_login(request):
username = request.params.get('username', None)
password = request.params.get('password', None)
if not (username and password):
raise ValueError('both username and password are required')
settings = request.registry.settings
manager = BCRYPTPasswordManager()
if username == settings.get('auth.username', ''):
hashed = settings.get('auth.password', '')
return manager.check(hashed, password)
@view_config(route_name='login', renderer='templates/login.jinja2')
def login(request):
"""Authenticate a user by username/password"""
username = request.params.get('username', '')
error = ''
if request.method == 'POST':
error = "Login Failed"
authenticated = False
try:
authenticated = do_login(request)
except ValueError as e:
error = str(e)
if authenticated:
headers = remember(request, username)
return HTTPFound(request.route_url('home'), headers=headers)
return {'error': error, 'username': username}
@view_config(route_name='logout')
def logout(request):
"""Logout user, remove authentication data, and redirect to home page."""
headers = forget(request)
return HTTPFound(request.route_url('home'), headers=headers)
SELECT_SINGLE_ENTRY = """
SELECT id, title, text, created FROM entries WHERE id=%s
"""
@view_config(route_name='detail', renderer='templates/detail.jinja2')
def entry_details(request):
entry = Entry.by_id(request.matchdict.get('id', -1))
del entry.__dict__['_sa_instance_state']
entry.display_text = markdown.markdown(entry.text, extensions=['codehilite(linenums=True)', 'fenced_code'])
return {'entry': entry, }
# ported to ORM, but does not update database
@view_config(route_name='edit', renderer='json')
def edit_entry(request):
if request.authenticated_userid:
if request.method == 'POST':
try:
Entry.from_request_edit(request)
except psycopg2.Error:
return HTTPInternalServerError
text = markdown.markdown(request.params.get('text', None), extensions=['codehilite(linenums=True)', 'fenced_code'])
return {'title': request.params.get('title', None), 'text': text}
else:
return HTTPForbidden()
def main():
"""Create a configured wsgi app."""
settings = {}
settings['reload_all'] = os.environ.get('DEBUG', True)
settings['debug_all'] = os.environ.get('DEBUG', True)
# settings['db'] = os.environ.get(
# 'DATABASE_URL', 'dbname=learning-journal user=mark'
# )
settings['sqlalchemy.url'] = os.environ.get(
# must be rfc1738 URL
'DATABASE_URL', 'postgresql://mark:@localhost:5432/learning-journal'
)
engine = sa.engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
# Add authentication setting configuration
settings['auth.username'] = os.environ.get('AUTH_USERNAME', 'admin')
manager = BCRYPTPasswordManager()
settings['auth.password'] = os.environ.get(
'AUTH_PASSWORD', manager.encode('secret'))
# secret value for session signing:
secret = os.environ.get('JOURNAL_SESSION_SECRET', 'itsaseekrit')
session_factory = SignedCookieSessionFactory(secret)
# add a secret value for auth tkt signing
auth_secret = os.environ.get('JOURNAL_AUTH_SECRET', 'anotherseekrit')
# configuration setup
config = Configurator(
settings=settings,
session_factory=session_factory,
authentication_policy=AuthTktAuthenticationPolicy(
secret=auth_secret,
hashalg='sha512'
),
authorization_policy=ACLAuthorizationPolicy(),
)
config.include('pyramid_jinja2')
config.include('pyramid_tm')
config.add_static_view('static', os.path.join(here, 'static'))
config.add_route('home', '/')
config.add_route('login', '/login')
config.add_route('logout', '/logout')
config.add_route('detail', '/detail/{id:\d+}')
config.add_route('edit', '/edit')
config.add_route('new', '/new')
config.scan()
app = config.make_wsgi_app()
return app
if __name__ == '__main__':
app = main()
port = os.environ.get('PORT', 5000)
serve(app, host='0.0.0.0', port=port)
| |
"""
Functions:
is_valid_gene_id
select_valid_gene_id
select_valid_gene_id_I
Classes:
AbstractGeneSet Abstract base class.
NoHeaderFileGeneSet No column headers, just specify a column.
HeaderFileGeneSet GeneSet from a file with column headers.
RandomGeneSet Draw random genes from a Matrix.
GMTGeneSet GMT format file.
GeneSetHomologConverter Change geneset to another organism.
"""
import os, sys
class AbstractGeneSet:
def __init__(self, gene_id_name):
self.gene_id_name = gene_id_name
def __iter__(self):
for gene in self.get_genes():
yield gene
# Implement in a derived class.
def get_genes(self):
# Return the genes in the geneset.
# Should only return valid gene IDs.
raise NotImplementedError
# Optional. I provide a default implementation. Can implement an
# optimized version if desired.
def __len__(self):
return len(self.get_genes())
def __getitem__(self, index):
return self.get_genes()[index]
def get_indexes(self, dataset):
# Return the indexes of the genes.
# Although geneset implements the iterator protocol, it's
# faster to call get_genes than to iterate over a list.
x = dataset._index(row=self.get_genes(), row_header=self.gene_id_name)
I_row, I_col = x
assert I_col is None
return I_row
class GeneSet(AbstractGeneSet):
def __init__(self, gene_id_name, genes):
AbstractGeneSet.__init__(self, gene_id_name)
self.genes = genes[:]
def get_genes(self):
return self.genes
class NoHeaderFileGeneSet(AbstractGeneSet):
def __init__(self, gene_id_name, filename, column_num):
AbstractGeneSet.__init__(self, gene_id_name)
# Save the parameters and load the geneset when necessary.
self.filename = filename
self.column_num = column_num
self.genes = None
def _get_genes(self):
import iolib
from filelib import openfh
i = self.column_num
data = openfh(self.filename).read()
x = [cols[i] for cols in iolib.split_tdf(data)]
x = iolib.strip_each(x)
x = {}.fromkeys(x).keys()
x = select_valid_gene_id(x)
return x
def get_genes(self):
if self.genes is None:
self.genes = self._get_genes()
return self.genes
class HeaderFileGeneSet(NoHeaderFileGeneSet):
def __init__(self, gene_id_name, filename, column_header):
from filelib import openfh
handle = openfh(filename)
header = handle.readline().rstrip("\r\n").split("\t")
assert column_header in header, (
"Invalid column name: %s" % column_header)
colnum = header.index(column_header)
NoHeaderFileGeneSet.__init__(self, gene_id_name, handle, colnum)
class RandomGeneSet(AbstractGeneSet):
def __init__(self, gene_id_name, dataset, num_probes):
# num_probes is the total number of probes on the chip that
# should be matched with each geneset.
assert num_probes > 0, "Need to match at least 1 probe"
AbstractGeneSet.__init__(self, gene_id_name)
self.dataset = dataset
self.num_probes = num_probes
self.fixed = None
# Index the IDs in the data set for faster selection.
ids = dataset.row_names(gene_id_name)
I_valid = select_valid_gene_id_I(ids)
assert self.num_probes <= len(I_valid), \
"Not enough gene IDs for sampling [%d/%d]." % (
len(I_valid), self.num_probes)
id2indexes = {} # id -> list of indexes
all_singles = True
for i in I_valid:
id = ids[i]
if id not in id2indexes:
id2indexes[id] = [i]
else:
id2indexes[id] += [i]
all_singles = False
# id, list of indexes, number of indexes
population = [None] * len(id2indexes)
if all_singles:
for i, (id, indexes) in enumerate(id2indexes.iteritems()):
population[i] = id, indexes[0], 1
else:
for i, (id, indexes) in enumerate(id2indexes.iteritems()):
population[i] = id, indexes, len(indexes)
self.gene_ids = ids
self.population = population
self.all_singles = all_singles
def _make_gene_set(self, num_probes):
# Make a random geneset. Return a list of the indexes into
# the original data set.
if num_probes is None:
num_probes = self.num_probes
while 1:
# Choose a random set of genes. Since genes may have many
# probe sets, this may result in too many probe sets.
# Using random.shuffle on ids is very slow. random.sample
# generates many calls to random and set.add.
selected = sample(self.population, self.num_probes)
# If every gene points to exactly 1 probe, then the number
# of genes is the same as the number of probes.
if self.all_singles:
num_indexes = len(selected)
else:
num_indexes = sum([x[2] for x in selected])
# If there are too many genes, then back off.
while num_indexes > self.num_probes:
x = selected.pop(0)
num_indexes -= x[2]
# If I have the right number of indexes, then stop.
# Otherwise, I have too many, so try again.
if num_indexes == self.num_probes:
break
# Gene set fail! Try again. Does not happen very often.
# Extract the indexes from the selected genes.
if self.all_singles:
indexes = [x[1] for x in selected]
else:
indexes = [None] * self.num_probes
i = 0
for x in selected:
for y in x[1]:
indexes[i] = y
i += 1
return indexes
def fix(self):
# Fix so that get_genes always returns the same gene set.
self.fixed = self._make_gene_set()
def get_indexes(self, dataset, num_probes=None):
# Return a list of indexes.
assert self.dataset is dataset
num_probes = num_probes or self.num_probes
if self.fixed is not None:
indexes = self.fixed
else:
indexes = self._make_gene_set(num_probes)
assert len(indexes) == num_probes
return indexes
def get_genes(self, num_probes=None):
# Return a list of random genes.
indexes = self.get_indexes(self.dataset, num_probes=num_probes)
geneset = [self.gene_ids[i] for i in indexes]
return geneset
class GMTGeneSet(AbstractGeneSet):
def __init__(self, gene_id_name, filename, geneset_name, *more_genesets):
AbstractGeneSet.__init__(self, gene_id_name)
# Save the parameters and load the geneset when necessary.
self.filename = filename
self.geneset_names = [geneset_name] + list(more_genesets)
self.genes = None
def _get_genes(self):
import iolib
import genesetlib
genes = []
for x in genesetlib.read_genesets(self.filename):
name, description, g = x
if name not in self.geneset_names:
continue
genes.extend(g)
if not genes:
raise AssertionError, "I could not find gene set: %s" % \
",".join(self.geneset_names)
x = genes
x = iolib.strip_each(x)
x = {}.fromkeys(x).keys()
x = select_valid_gene_id(x)
return x
def get_genes(self):
if self.genes is None:
self.genes = self._get_genes()
return self.genes
class GeneSetHomologConverter(AbstractGeneSet):
def __init__(self, geneset, converter):
# converter is a dict of old_gene_id -> new_gene_id.
self.geneset = geneset
self.converter = converter
AbstractGeneSet.__init__(self, geneset.gene_id_name)
def get_genes(self):
x = self.geneset.get_genes()
x = [self.converter.get(x) for x in x]
x = {}.fromkeys(x).keys()
x = select_valid_gene_id(x)
return x
def is_valid_gene_id(id):
if id is None:
return 0
id = id.strip()
if id in ["", "0", "---"]:
return 0
if id.find("///") >= 0:
return 0
return 1
def select_valid_gene_id(L):
I = select_valid_gene_id_I(L)
return [L[i] for i in I]
def select_valid_gene_id_I(L):
return [i for i, x in enumerate(L) if is_valid_gene_id(x)]
def sample(L, n):
# Choose n random objects from L.
import random
return random.sample(L, n)
try:
#raise ImportError
import cGeneSet
except ImportError:
pass
else:
this_module = sys.modules[__name__]
for name in cGeneSet.__dict__.keys():
if name.startswith("__"):
continue
this_module.__dict__[name] = cGeneSet.__dict__[name]
| |
from __future__ import print_function
from copy import copy
from itertools import chain, combinations, permutations
from builtins import map
from builtins import object
from builtins import range
from builtins import zip
import PyAnalysisTools.PlottingUtils.Formatting as FM
import PyAnalysisTools.PlottingUtils.HistTools as HT
import PyAnalysisTools.PlottingUtils.PlottingTools as PT
import ROOT
from PyAnalysisTools.AnalysisTools.EfficiencyCalculator import EfficiencyCalculator as ec
from PyAnalysisTools.AnalysisTools.XSHandle import XSHandle
from PyAnalysisTools.PlottingUtils.BasePlotter import BasePlotter
from PyAnalysisTools.PlottingUtils.PlotConfig import PlotConfig as pc
from PyAnalysisTools.PlottingUtils.PlotConfig import get_histogram_definition
from PyAnalysisTools.base.ProcessConfig import find_process_config
from PyAnalysisTools.PlottingUtils.Plotter import Plotter
from PyAnalysisTools.base.FileHandle import FileHandle
from PyAnalysisTools.ROOTUtils.ObjectHandle import find_branches_matching_pattern
from PyAnalysisTools.ROOTUtils.ObjectHandle import get_objects_from_canvas_by_type
from PyAnalysisTools.base import _logger, InvalidInputError
from PyAnalysisTools.base.OutputHandle import OutputFileHandle
from PyAnalysisTools.base.YAMLHandle import YAMLLoader as yl
class TriggerFlattener(object):
def __init__(self, **kwargs):
if "input_file" not in kwargs:
raise InvalidInputError("No input file name provided")
if "tree_name" not in kwargs:
raise InvalidInputError("No tree name provided")
kwargs.setdefault("additional_trees", [])
kwargs.setdefault("tmp_dir", None)
kwargs.setdefault("branch_name", "triggerList")
self.file_handle = FileHandle(file_name=kwargs["input_file"], run_dir=kwargs["tmp_dir"], open_option="UPDATE")
self.tree_name = kwargs["tree_name"]
self.tree = self.file_handle.get_object_by_name(self.tree_name, tdirectory="Nominal")
self.branch_name = kwargs["branch_name"]
self.additional_trees_names = kwargs["additional_trees"]
if self.additional_trees_names is None:
self.additional_trees_names = []
for tn in self.additional_trees_names:
setattr(self, tn, self.file_handle.get_object_by_name(tn, tdirectory="Nominal"))
self.trigger_list = []
def flatten_all_branches(self, skipAcceptance=False):
# branch_names = find_branches_matching_pattern(self.tree, "^trigger_.*")
branch_names = find_branches_matching_pattern(self.tree, "^trigger.*")
self.read_triggers()
# branch_names.remove("trigger_list")
branch_names.remove("triggerList")
self.expand_branches(branch_names)
def read_triggers(self):
for entry in range(self.tree.GetEntries()):
self.tree.GetEntry(entry)
# for item in range(len(self.tree.trigger_list)):
# if self.tree.trigger_list[item].replace("-", "_") not in self.trigger_list:
# self.trigger_list.append(self.tree.trigger_list[item].replace("-", "_"))
for item in range(len(self.tree.triggerList)):
if self.tree.triggerList[item].replace("-", "_") not in self.trigger_list:
self.trigger_list.append(self.tree.triggerList[item].replace("-", "_"))
def expand_branches(self, branch_names, skipAcceptance=False):
for branch_name in branch_names:
for trigger_name in self.trigger_list:
new_name = branch_name.replace("trigger", trigger_name)
if "acceptance" in new_name:
new_trigName = new_name
if skipAcceptance:
new_trigName = new_name.replace("_acceptance", "").replace("Acceptance", "")
exec("data_holder_{:s} = array(\'i\', [0])".format(new_name))
exec("branch_{:s} = self.tree.Branch(\"{:s}\", data_holder_{:s}, \"{:s}/I\")".format(new_name,
new_trigName,
new_name,
new_trigName))
for tn in self.additional_trees_names:
exec("branch_{:s}_{:s} = self.{:s}.Branch(\"{:s}\", data_holder_{:s}, "
"\"{:s}/I\")".format(tn, new_name, tn, new_trigName, new_name, new_trigName))
else:
exec("data_holder_{:s} = array(\'f\', [0.])".format(new_name))
exec("branch_{:s} = self.tree.Branch(\"{:s}\", data_holder_{:s}, \"{:s}/F\")".format(
*[new_name] * 4))
for tn in self.additional_trees_names:
exec("branch_{:s}_{:s} = self.{:s}.Branch(\"{:s}\", data_holder_{:s}, "
"\"{:s}/F\")".format(tn, new_name, tn, *[new_name] * 3))
for entry in range(self.tree.GetEntries()):
self.tree.GetEntry(entry)
for tree_name in self.additional_trees_names:
getattr(self, tree_name).GetEntry(entry)
unprocessed_triggers = copy(self.trigger_list)
exec("trig_list_branch = self.tree.{:s}".format(self.branch_name))
# for item in range(len(self.tree.trigger_list)):
# trig_name = self.tree.trigger_list[item].replace("-", "_")
for item in range(len(self.tree.triggerList)):
trig_name = self.tree.triggerList[item].replace("-", "_")
if trig_name not in unprocessed_triggers:
_logger.warning("{:s} not in unprocessed trigger list. Likely there went something wrong in the "
"branch filling".format((trig_name)))
continue
unprocessed_triggers.remove(trig_name)
for branch_name in branch_names:
new_name = branch_name.replace("trigger", trig_name)
exec("data_holder_{:s}[0] = self.tree.{:s}[item]".format(new_name, branch_name))
eval("branch_{:s}.Fill()".format(new_name))
for tn in self.additional_trees_names:
eval("branch_{:s}_{:s}.Fill()".format(tn, new_name))
for missing_trigger in unprocessed_triggers:
for branch_name in branch_names:
new_name = branch_name.replace("trigger", missing_trigger)
exec("data_holder_{:s}[0] = -1111".format(new_name))
eval("branch_{:s}.Fill()".format(new_name))
for tn in self.additional_trees_names:
eval("branch_{:s}_{:s}.Fill()".format(tn, new_name))
tdir = self.file_handle.get_directory("Nominal")
tdir.cd()
self.tree.Write()
for tree_name in self.additional_trees_names:
getattr(self, tree_name).Write()
class TriggerAcceptancePlotter(BasePlotter):
def __init__(self, **kwargs):
kwargs.setdefault("datset_info", None)
kwargs.setdefault("output_file_name", "plots.root")
self.file_handles = [FileHandle(file_name=file_name, dataset_info=kwargs["xs_config_file"])
for file_name in kwargs["input_files"]]
self.tree_name = kwargs["tree_name"]
self.hist_def = None
super(TriggerAcceptancePlotter, self).__init__(**kwargs)
self.xs_handle = XSHandle(kwargs["xs_config_file"])
self.output_handle = OutputFileHandle(make_plotbook=self.plot_configs[0].make_plot_book, **kwargs)
self.trigger_list = self.build_trigger_list()
self.trigger_list = [t for t in self.trigger_list if 'prescale' not in t and 'online' not in t]
self.overlap_hist = None
self.unqiue_rate_hist = None
def __del__(self):
self.output_handle.write_and_close()
def build_trigger_list(self):
trigger_list = list(set(list(chain.from_iterable([file_handle.get_branch_names_from_tree(self.tree_name,
tdirectory="Nominal",
pattern="HLT_") for
file_handle in self.file_handles]))))
if hasattr(self.plot_configs[0], 'white_list'):
trigger_list = [x for x in trigger_list if x in self.plot_configs[0].white_list]
if hasattr(self.plot_configs[0], 'black_list'):
trigger_list = [x for x in trigger_list if x not in self.plot_configs[0].black_list]
return trigger_list
def get_hist_def(self, name):
if self.hist_def is not None:
return self.hist_def.Clone(name)
hist = ROOT.TH1F(name, "", len(self.trigger_list), 0., len(self.trigger_list))
for trigger_name in self.trigger_list:
hist.GetXaxis().SetBinLabel(self.trigger_list.index(trigger_name) + 1,
trigger_name.replace("_acceptance", "").replace("Acceptance", ""))
hist.GetXaxis().SetLabelSize(0.03)
return hist
def apply_lumi_weights(self, histograms):
for process, hist in list(histograms.items()):
if hist is None:
_logger.error("Histogram for process {:s} is None".format(process))
continue
if "data" in process.lower():
continue
cross_section_weight = self.xs_handle.get_lumi_scale_factor(process, self.lumi, self.event_yields[process])
HT.scale(hist, cross_section_weight)
def plot_trigger_acceptance(self):
self.read_cutflows()
raw_data = self.read_triggers()
histograms = self.make_histograms(raw_data)
self.apply_lumi_weights(histograms)
if self.process_configs is not None:
for process_name in list(histograms.keys()):
_ = find_process_config(process_name, self.process_configs)
Plotter.merge(histograms, self.process_configs)
# canvas = PT.plot_stack(histograms, self.plot_configs[0]) -- mmm dev
canvas = PT.plot_objects(histograms, self.plot_configs[0])
canvas = FM.format_canvas(canvas, margin={"right": 0.15, "bottom": 0.2})
canvas.Modified()
FM.decorate_canvas(canvas, self.plot_configs[0])
self.output_handle.register_object(canvas)
def read_data(self, file_handle, trigger_data={}):
tree = file_handle.get_object_by_name(self.tree_name, tdirectory="Nominal")
entries = tree.GetEntries()
for entry in range(entries):
tree.GetEntry(entry)
for trigger in self.trigger_list:
try:
trigger_data[trigger].append(eval("tree.{:s}".format(trigger)))
except KeyError:
trigger_data[trigger] = [(eval("tree.{:s}".format(trigger)))]
return trigger_data
def get_overlap_coefficients(self, trigger_data):
trigger_combinations = list(combinations(self.trigger_list, 2))
trigger_overlap = {}
for comb in trigger_combinations:
if sum(trigger_data[comb[0]]) > 0. and sum(trigger_data[comb[1]]) > 0.:
overlap = sum(map(float, [d[0] == d[1] and d[0] == 1 for d in zip(trigger_data[comb[0]],
trigger_data[comb[1]])]))
overlap /= sum(map(float, [d[0] == 1 or d[1] == 1 for d in zip(trigger_data[comb[0]],
trigger_data[comb[1]])]))
else:
overlap = 0.
trigger_overlap[comb] = overlap
return trigger_overlap
def get_unique_correlation_coefficients(self, trigger_data):
trigger_combinations = list(permutations(self.trigger_list, 2))
trigger_overlap = {}
for comb in trigger_combinations:
if sum(trigger_data[comb[0]]) > 0. and sum(trigger_data[comb[1]]) > 0.:
overlap = sum(map(float, [d[0] == d[1] and d[0] == 1 for d in zip(trigger_data[comb[0]],
trigger_data[comb[1]])]))
try:
overlap /= sum(map(float, [d[0] == 1 for d in zip(trigger_data[comb[0]],
trigger_data[comb[1]])]))
except ZeroDivisionError:
overlap = 0.
else:
overlap = 0.
trigger_overlap[comb] = overlap
return trigger_overlap
def get_unique_rate(self, trigger_data):
# trigger_unqiue_rate = {}
print(trigger_data)
exit(0)
# for comb in trigger_combinations:
# overlap = sum(map(float, [d[0] == d[1] and d[0] == 1 for d in zip(trigger_data[comb[0]],
# trigger_data[comb[1]])]))
# overlap /= sum(map(float, [d[0] == 1 or d[1] == 1 for d in zip(trigger_data[comb[0]],
# trigger_data[comb[1]])]))
# trigger_overlap[comb] = overlap
# return trigger_overlap
def plot_trigger_correlation(self):
process_dict = {}
for file_handle in self.file_handles:
if file_handle.process in process_dict:
process_dict[file_handle.process].append(file_handle)
else:
process_dict[file_handle.process] = [file_handle]
for process, file_handles in list(process_dict.items()):
trigger_data = {}
for file_handle in file_handles:
trigger_data = self.read_data(file_handle, trigger_data)
overlap = self.get_overlap_coefficients(trigger_data)
self.output_handle.register_object(self.make_overlap_histogram("overlap_{:s}".format(file_handle.process),
overlap))
def plot_trigger_unique_correlation(self):
process_dict = {}
for file_handle in self.file_handles:
if file_handle.process in process_dict:
process_dict[file_handle.process].append(file_handle)
else:
process_dict[file_handle.process] = [file_handle]
for process, file_handles in list(process_dict.items()):
trigger_data = {}
for file_handle in file_handles:
trigger_data = self.read_data(file_handle, trigger_data)
overlap = self.get_unique_correlation_coefficients(trigger_data)
self.output_handle.register_object(
self.make_overlap_histogram("unique_correlation_{:s}".format(file_handle.process),
overlap, unique=True))
def plot_unqiue_trigger_rate(self):
for file_handle in self.file_handles:
trigger_data = self.read_data(file_handle)
unique_rate = self.get_unique_rate(trigger_data)
self.output_handle.register_object(
self.make_unique_rate_histogram("unqiue_rate_{:s}".format(file_handle.process),
unique_rate))
def make_overlap_histogram(self, name, data, unique=False):
ROOT.gStyle.SetPaintTextFormat("4.2f")
def get_hist_def():
self.overlap_hist = ROOT.TH2F(name, "", len(self.trigger_list), 0., len(self.trigger_list),
len(self.trigger_list), 0., len(self.trigger_list))
for trigger_name in self.trigger_list:
index = self.trigger_list.index(trigger_name)
self.overlap_hist.GetXaxis().SetBinLabel(index + 1,
trigger_name.replace("_acceptance", "").replace("Acceptance",
""))
self.overlap_hist.GetXaxis().SetLabelSize(0.02)
self.overlap_hist.GetYaxis().SetBinLabel(len(self.trigger_list) - index,
trigger_name.replace("_acceptance", "").replace("Acceptance",
""))
self.overlap_hist.GetYaxis().SetLabelSize(0.02)
self.overlap_hist.GetZaxis().SetLabelSize(0.03)
get_hist_def()
for comb, overlap in list(data.items()):
self.overlap_hist.Fill(comb[0].replace("_acceptance", "").replace("Acceptance", ""),
comb[1].replace("_acceptance", "").replace("Acceptance", ""), overlap)
if not unique:
self.overlap_hist.Fill(comb[1].replace("_acceptance", "").replace("Acceptance", ""),
comb[0].replace("_acceptance", "").replace("Acceptance", ""), overlap)
for i in range(self.overlap_hist.GetNbinsX()):
self.overlap_hist.Fill(i, self.overlap_hist.GetNbinsX() - i - 1, 1.)
ztitle = "(trigger0 || trigger1)/(trigger0 && trigger1)"
if unique:
ztitle = "(trigger0 && trigger1)/trigger0"
plot_config = pc(name=name, draw_option="COLZTEXT",
xtitle="trigger 0", xtitle_offset=2.0, xtitle_size=0.04,
ytitle="trigger 1", ytitle_offset=2.8, ytitle_size=0.04,
ztitle=ztitle, ztitle_size=0.04)
canvas = PT.plot_2d_hist(self.overlap_hist, plot_config=plot_config)
canvas = FM.format_canvas(canvas, margin={"left": 0.2, "bottom": 0.16})
canvas.Update()
return canvas
def make_unique_rate_histogram(self, name, data):
def get_hist_def():
if self.unique_rate_hist is not None:
return self.unique_rate_hist.Clone(name)
self.unique_rate_hist = ROOT.TH1F(name, "", len(self.trigger_list), 0., len(self.trigger_list))
for trigger_name in self.trigger_list:
index = self.trigger_list.index(trigger_name)
self.unqiue_rate_hist.GetXaxis().SetBinLabel(index + 1, trigger_name.replace("_acceptance", "").replace(
"Acceptance", ""))
self.unqiue_rate_hist.GetXaxis().SetLabelSize(0.03)
get_hist_def()
for comb, overlap in list(data.items()):
self.overlap_hist.Fill(comb[0].replace("_acceptance", "").replace("Acceptance", ""),
comb[1].replace("_acceptance", "").replace("Acceptance", ""), overlap)
self.overlap_hist.Fill(comb[1].replace("_acceptance", "").replace("Acceptance", ""),
comb[0].replace("_acceptance", "").replace("Acceptance", ""), overlap)
plot_config = pc(name=name, draw_option="HIST")
return PT.plot_hist(self.unqiue_rate_hist, plot_config=plot_config)
def make_histograms(self, data):
histograms = {}
for process, trigger_info in list(data.items()):
hist = self.get_hist_def("trigger_" + process)
histograms[process] = self.fill_histogram(hist, trigger_info)
return histograms
@staticmethod
def fill_histogram(hist, data):
for label, count in list(data.items()):
hist.Fill(label, count)
return hist
def read_triggers(self):
def parse_trigger_info(file_handle):
tree = file_handle.get_object_by_name(self.tree_name, tdirectory="Nominal")
tmp = dict((trigger.replace("_acceptance", "").replace("Acceptance", ""),
tree.GetEntries("{:s} == 1".format(trigger))) for trigger in self.trigger_list)
return tmp
data = dict((file_handle.process, parse_trigger_info(file_handle)) for file_handle in self.file_handles)
return data
class TriggerEfficiencyAnalyser(BasePlotter):
def __init__(self, **kwargs):
if "tree_name" not in kwargs:
raise InvalidInputError("No tree name provided")
self.file_list = kwargs["input_files"]
self.tree_name = kwargs["tree_name"]
self.file_handles = [FileHandle(file_name=file_name) for file_name in self.file_list]
self.calculator = ec()
super(TriggerEfficiencyAnalyser, self).__init__(**kwargs)
self.output_handle = OutputFileHandle(**kwargs)
self.config = yl.read_yaml(kwargs["config_file"])
self.denominators = {}
self.efficiencies = {}
def get_denominators(self, plot_config):
if plot_config.dist not in self.denominators:
hist = get_histogram_definition(plot_config)
cut_string = ""
if hasattr(plot_config, "cut"):
cut_string = plot_config.cut
self.denominators[plot_config.dist] = dict((file_handle.process,
file_handle.fetch_and_link_hist_to_tree(self.tree_name,
hist,
plot_config.dist,
cut_string=cut_string,
tdirectory="Nominal"))
for file_handle in self.file_handles)
return self.denominators[plot_config.dist]
def get_efficiency(self, trigger_name, plot_config):
denominators = self.get_denominators(plot_config)
numerator_plot_config = copy(plot_config)
numerator_plot_config.dist = numerator_plot_config.numerator.replace("replace", trigger_name)
numerator_plot_config.name = numerator_plot_config.numerator.replace("replace", trigger_name).split()[0]
hist = get_histogram_definition(numerator_plot_config)
cut_string = ""
if hasattr(plot_config, "cut"):
cut_string = plot_config.cut.replace(plot_config.dist, numerator_plot_config.dist)
numerators = dict((file_handle.process,
file_handle.fetch_and_link_hist_to_tree(self.tree_name,
hist,
numerator_plot_config.dist,
cut_string=cut_string,
tdirectory="Nominal")) for file_handle in
self.file_handles)
if not isinstance(list(numerators.values())[0], ROOT.TH2F):
dependency = plot_config.name.split("_")[-1]
else:
dependency = "QetavsPt"
efficiencies = dict((process,
self.calculator.calculate_efficiency(numerators[process],
denominators[process],
name="eff_{:s}_{:s}_{:s}".format(process,
trigger_name,
dependency)))
for process in list(numerators.keys()))
if not isinstance(list(numerators.values())[0], ROOT.TH2F):
canvas = PT.plot_objects(efficiencies, plot_config)
else:
plot_config.name = list(efficiencies.values())[0].GetName()
canvas = PT.plot_obj(list(efficiencies.values())[0], plot_config)
if "dr" in plot_config.name:
self.fit_efficiency(canvas)
return canvas
def fit_efficiency(self, canvas):
efficiency_graphs = get_objects_from_canvas_by_type(canvas, "TEfficiency")
fit_func = ROOT.TF1("fermi", "[3] / ([0] + [3] *[4]) * ( [0] / (exp(-[1] * x + [2]) + [3])) + [4]", 0., 0.3)
fit_func.SetParameters(1, 10, 0., 0., 0.)
fit_func.SetParLimits(0, -5., 20.)
fit_func.SetParLimits(1, -100., 100.)
fit_func.SetParLimits(2, -10., 10.)
fit_func.SetParLimits(3, -10., 10.)
fit_func.SetParLimits(4, -10., 10.)
# print efficiency_graphs
# a = ROOT.RooRealVar("a", "", 1, 5., 20, )
# b = ROOT.RooRealVar("b", "", 10., -100., 100.)
# c = ROOT.RooRealVar("c", "", 0., -10., 10.)
# d = ROOT.RooRealVar("d", "", 0., -10., 10.)
# e = ROOT.RooRealVar("e", "", 0., -10., 10.)
# dr = ROOT.RooRealVar("dr", "#Delta R", 0., 0.3)
# fermi = ROOT.RooGenericPdf("fermi", "fermi function", "d / (a + d *e) * ( a / (exp(-B * dr + c) + d)) + e",
# #ROOT.RooArgSet(dr, a, b, c, d, e))
# data = ROOT.RooDataHist("efficiency", "", )
for teff in efficiency_graphs:
teff.Fit(fit_func)
fit_func.Draw("L same")
def get_efficiencies(self):
for plot_config in self.plot_configs:
for trigger_name in self.config["triggers"]:
canvas = self.get_efficiency(trigger_name, plot_config)
self.efficiencies[trigger_name] = canvas
self.output_handle.register_object(canvas)
self.output_handle.write_and_close()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import codecs
import sys, time, json, traceback
from apiclient.discovery import build
from apiclient.errors import HttpError
#from oauth2client.tools import argparser
# Set DEVELOPER_KEY to the API key value from the APIs & auth > Registered apps
# tab of
# https://cloud.google.com/console
# Please ensure that you have enabled the YouTube Data API for your project.
#DEVELOPER_KEY = "REPLACE_ME"
exec file("YOUTUBE_DEV_KEY.txt").read()
YOUTUBE_API_SERVICE_NAME = "youtube"
#YOUTUBE_API_SERVICE_NAME = "WVVidWatch"
YOUTUBE_API_VERSION = "v3"
class YouTubeScraper:
def __init__(self, useUTF8=True):
if useUTF8:
UTF8Writer = codecs.getwriter('utf8')
sys.stdout = UTF8Writer(sys.stdout)
self.query = ""
self.recs = {}
self.VIDNUM = 0
self.youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
def getChannelId(self, username):
# Retrieve the contentDetails part of the channel resource for the
# authenticated user's channel.
channels_response = self.youtube.channels().list(
forUsername="enockglidden",
part="contentDetails"
).execute()
#print json.dumps(channels_response, indent=True)
try:
return channels_response["items"][0]["id"]
except:
return None
def getChannelVideosForUser(self, username="enockglidden", fname=None):
"""
Find videos for all channels of a given user
that have location information available.
"""
self.query = "channelSearch: username=%s" % username
if fname == None:
fname = "%s_data.json" % username
videoIds = []
channels_response = self.youtube.channels().list(
forUsername=username,
part="contentDetails"
).execute()
print json.dumps(channels_response, indent=True)
for channel in channels_response["items"]:
# From the API response, extract the playlist ID that identifies the list
# of videos uploaded to the authenticated user's channel.
uploads_list_id = channel["contentDetails"]["relatedPlaylists"]["uploads"]
print "Videos in list %s" % uploads_list_id
# Retrieve the list of videos uploaded to the authenticated user's channel.
playlistitems_list_request = self.youtube.playlistItems().list(
playlistId=uploads_list_id,
#part="snippet,recordingDetails",
part="snippet",
maxResults=50
)
while playlistitems_list_request:
playlistitems_list_response = playlistitems_list_request.execute()
# Print information about each video.
for playlist_item in playlistitems_list_response["items"]:
title = playlist_item["snippet"]["title"]
video_id = playlist_item["snippet"]["resourceId"]["videoId"]
print "%s (%s)" % (title, video_id)
if 0:
print json.dumps(playlist_item, indent=4, sort_keys=True)
print
videoIds.append(video_id)
playlistitems_list_request = self.youtube.playlistItems().list_next(
playlistitems_list_request, playlistitems_list_response)
video_ids = ",".join(videoIds)
print "video_ids:", video_ids
self.processIds(video_ids)
self.saveRecs(fname)
def getLocs(self, latMin, lonMin, latMax, lonMax, dlat, dlon):
print "getting locs from %s to %s lat in steps of %s and %s to %s lon in steps of %s" % \
(latMin, latMax, dlat, lonMin, lonMax, dlon)
locs = []
for lat in range(latMin,latMax+dlat,dlat):
for lon in range(lonMin,lonMax+dlon,dlon):
if lat==0 and lon==0:
continue
if (lat==-90 or lat==90) and lon != 0:
continue
locs.append("%.1f,%.1f" % (lat,lon))
print "Got %d specific points" % len(locs)
return locs
def fetch(self, name, query=None, locs=None, dimension="any", username=None, channelId=None):
if query == None:
query = name
if username != None:
channelId = self.getChannelId(username)
fname = "%s_data.json" % name
if locs == None:
locs = ["37.42307,-122.08427",
"15.0465951,-166.3735415"]
"""
These choices are experimental and not very well worked
out yet.
"""
if type(locs) in [type("str"), type(u"str")]:
if locs.lower() == "global":
locs = self.getLocs(-90, -180, 90, 180, 4, 4)
if locs.lower() == "us":
locs = self.getLocs(36, -123, 44, -66, 1, 1)
for loc in locs:
try:
self.search(query=query, location=loc, dimension=dimension, channelId=channelId)
except HttpError, e:
print "An HTTP error %d occurred:\n%s" % (e.resp.status, e.content)
except:
traceback.print_exc()
self.saveRecs(fname)
def search(self, query, location, max_results=50, location_radius="1000km", dimension="any", channelId=None):
print "query:", query
print "location:", location
print "location_radius:", location_radius
self.query = query
"""
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
"""
# Call the search.list method to retrieve results matching the specified
# query term.
search_response = self.youtube.search().list(
q=query,
type="video",
location=location,
videoDimension=dimension,
locationRadius=location_radius,
channelId=channelId,
part="id,snippet",
maxResults=max_results
).execute()
search_videos = []
# Merge video ids
for search_result in search_response.get("items", []):
search_videos.append(search_result["id"]["videoId"])
video_ids = ",".join(search_videos)
self.processIds(video_ids)
def processIds(self, video_ids):
print "processIds video_ids:", video_ids
# Call the videos.list method to retrieve location details for each video.
video_response = self.youtube.videos().list(
id=video_ids,
part='snippet, recordingDetails'
).execute()
# Add each result to the list, and then display the list of matching videos.
items = video_response.get("items", [])
print "Got %d items" % len(items)
for video_result in items:
"""
if "recordingDetails" not in video_result:
print "video_result missing recordingDetails for id", video_result["id"]
continue
if "location" not in video_result['recordingDetails']:
print "no location in recording details for id", video_result["id"]
continue
if 0:
print json.dumps(video_result, indent=4)
print
"""
#print video_result
try:
lat = video_result["recordingDetails"]["location"]["latitude"]
lon = video_result["recordingDetails"]["location"]["longitude"]
title = video_result["snippet"]["title"]
except:
print "Cannot get location data from record for", video_result["id"]
continue
self.VIDNUM += 1
id = video_result["id"]
rec = {'youtubeId': id,
'id': "%d" % self.VIDNUM,
'lat': lat,
'lon': lon,
'title': title}
rec['publishedAt'] = video_result["snippet"]["publishedAt"]
rec['thumbnails'] = video_result["snippet"]["thumbnails"]
self.recs[id] = rec
#print rec
def saveRecs(self, jsonPath):
t0 = time.time()
recs = []
for id in self.recs.keys():
recs.append(self.recs[id])
f = UTF8Writer(file(jsonPath, "w"))
obj = {'query': self.query,
'time': time.time(),
'records': recs}
f.write(json.dumps(obj, indent=4, sort_keys=True))
t1 = time.time()
print "Wrote %d recs to %s in %.3fs" % (len(recs), jsonPath, t1-t0)
#argparser.add_argument("--location-radius", help="Location radius", default="1000km")
#argparser.add_argument("--max-results", help="Max results", default=50)
def fetch(name, query=None, loc="global", dimension="any", username=None):
ys = YouTubeScraper()
ys.fetch(name, query, loc, dimension, username)
# ys.fetch(name, query, loc, dimension)
def getMetaData(id=None, opath=None):
ys = YouTubeScraper()
ys.processIds(id)
print ys.recs
if opath:
ys.saveRecs(opath)
def testGetMetaData():
print "-----------------------------------"
getMetaData("JYk0qa8D4JY")
print "-----------------------------------"
getMetaData("cFtySuUNCcQ")
print "-----------------------------------"
ids = "JYk0qa8D4JY,cFtySuUNCcQ"
getMetaData(ids, "testMetaData.json")
def saveEnocksVideoLayer():
ys = YouTubeScraper()
ys.getChannelVideosForUser("enockglidden")
def testChannels():
ys = YouTubeScraper()
usernames = ["enockglidden"]
for username in usernames:
print "username:", username
id = ys.getChannelId(username)
print "id:", id
ys.getChannelVideosForUser(username)
print
print
if __name__ == "__main__":
# fetch("hiking")
# fetch("surfing")
# fetch("boating", query="boating|sailing|surfing|waterski -fishing", loc=None)
# fetch("waterSports3D", query="360 video", loc=None)
# fetch("test", username="enockglidden", loc="us")
# fetch("test", username="enockglidden", loc=["36.98,-122.00"])
# fetch("test", query="Wilder Ranch State Park", loc=["36.98418,-122.09912"])
# testChannels()
# saveEnocksVideoLayer()
testGetMetaData()
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:7054")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:7054")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras' base preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.engine import base_preprocessing_layer_v1
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
# Define a test-only implementation of CombinerPreprocessingLayer to validate
# its correctness directly.
class AddingPreprocessingLayer(
base_preprocessing_layer.CombinerPreprocessingLayer):
_SUM_NAME = "sum"
def __init__(self, **kwargs):
super(AddingPreprocessingLayer, self).__init__(
combiner=self.AddingCombiner(), **kwargs)
def build(self, input_shape):
super(AddingPreprocessingLayer, self).build(input_shape)
self._sum = self._add_state_variable(
name=self._SUM_NAME,
shape=(1,),
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer)
def set_total(self, sum_value):
"""This is an example of how a subclass would implement a direct setter.
These methods should generally just create a dict mapping the correct names
to the relevant passed values, and call self._set_state_variables() with the
dict of data.
Args:
sum_value: The total to set.
"""
self._set_state_variables({self._SUM_NAME: [sum_value]})
def call(self, inputs):
return inputs + self._sum
# Define a Combiner for this layer class.
class AddingCombiner(base_preprocessing_layer.Combiner):
def compute(self, batch_values, accumulator=None):
"""Compute a step in this computation, returning a new accumulator."""
new_accumulator = 0 if batch_values is None else np.sum(batch_values)
if accumulator is None:
return new_accumulator
else:
return self.merge([accumulator, new_accumulator])
def merge(self, accumulators):
"""Merge several accumulators to a single accumulator."""
# Combine accumulators and return the result.
result = accumulators[0]
for accumulator in accumulators[1:]:
result = np.sum([np.sum(result), np.sum(accumulator)])
return result
def extract(self, accumulator):
"""Convert an accumulator into a dict of output values."""
# We have to add an additional dimension here because the weight shape
# is (1,) not None.
return {AddingPreprocessingLayer._SUM_NAME: [accumulator]}
def restore(self, output):
"""Create an accumulator based on 'output'."""
# There is no special internal state here, so we just return the relevant
# internal value. We take the [0] value here because the weight itself
# is of the shape (1,) and we want the scalar contained inside it.
return output[AddingPreprocessingLayer._SUM_NAME][0]
def serialize(self, accumulator):
"""Serialize an accumulator for a remote call."""
return compat.as_bytes(json.dumps(accumulator))
def deserialize(self, encoded_accumulator):
"""Deserialize an accumulator received from 'serialize()'."""
return json.loads(compat.as_text(encoded_accumulator))
class AddingPreprocessingLayerV1(
AddingPreprocessingLayer,
base_preprocessing_layer_v1.CombinerPreprocessingLayer):
pass
def get_layer(**kwargs):
if context.executing_eagerly():
return AddingPreprocessingLayer(**kwargs)
else:
return AddingPreprocessingLayerV1(**kwargs)
@keras_parameterized.run_all_keras_modes
class PreprocessingLayerTest(keras_parameterized.TestCase):
def test_adapt_bad_input_fails(self):
"""Test that non-Dataset/Numpy inputs cause a reasonable error."""
input_dataset = {"foo": 0}
layer = get_layer()
with self.assertRaisesRegex(ValueError, "requires a"):
layer.adapt(input_dataset)
def test_adapt_infinite_dataset_fails(self):
"""Test that preproc layers fail if an infinite dataset is passed."""
input_dataset = dataset_ops.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]])).repeat()
layer = get_layer()
with self.assertRaisesRegex(ValueError, ".*infinite number of elements.*"):
layer.adapt(input_dataset)
def test_pre_build_injected_update_with_no_build_fails(self):
"""Test external update injection before build() is called fails."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = get_layer()
combiner = layer._combiner
updates = combiner.extract(combiner.compute(input_dataset))
with self.assertRaisesRegex(RuntimeError, ".*called after build.*"):
layer._set_state_variables(updates)
def test_setter_update(self):
"""Test the prototyped setter method."""
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.set_total(15)
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_pre_build_adapt_update_numpy(self):
"""Test that preproc layers can adapt() before build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = get_layer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_post_build_adapt_update_numpy(self):
"""Test that preproc layers can adapt() after build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_pre_build_injected_update(self):
"""Test external update injection before build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = get_layer()
combiner = layer._combiner
updates = combiner.extract(combiner.compute(input_dataset))
layer.build((1,))
layer._set_state_variables(updates)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_post_build_injected_update(self):
"""Test external update injection after build() is called."""
input_dataset = np.array([1, 2, 3, 4, 5])
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
combiner = layer._combiner
updates = combiner.extract(combiner.compute(input_dataset))
layer._set_state_variables(updates)
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_pre_build_adapt_update_dataset(self):
"""Test that preproc layers can adapt() before build() is called."""
input_dataset = dataset_ops.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]]))
layer = get_layer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_adapt_dataset_of_tuples_fails(self):
"""Test that preproc layers can adapt() before build() is called."""
input_dataset = dataset_ops.Dataset.from_tensor_slices((
np.array([[1], [2], [3], [4], [5], [0]]),
np.array([[1], [2], [3], [4], [5], [0]])))
layer = get_layer()
with self.assertRaisesRegex(TypeError, "single-Tensor elements"):
layer.adapt(input_dataset)
def test_post_build_adapt_update_dataset(self):
"""Test that preproc layers can adapt() after build() is called."""
input_dataset = dataset_ops.Dataset.from_tensor_slices(
np.array([[1], [2], [3], [4], [5], [0]]))
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
def test_further_tuning(self):
"""Test that models can be tuned with multiple calls to 'adapt'."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = get_layer()
layer.adapt(input_dataset)
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
layer.adapt(np.array([1, 2]), reset_state=False)
self.assertAllEqual([[19], [20], [21]], model.predict([1., 2., 3.]))
def test_further_tuning_post_injection(self):
"""Test that models can be tuned with multiple calls to 'adapt'."""
input_dataset = np.array([1, 2, 3, 4, 5])
layer = get_layer()
input_data = keras.Input(shape=(1,))
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
combiner = layer._combiner
updates = combiner.extract(combiner.compute(input_dataset))
layer._set_state_variables(updates)
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
layer.adapt(np.array([1, 2]), reset_state=False)
self.assertAllEqual([[19], [20], [21]], model.predict([1., 2., 3.]))
def test_weight_based_state_transfer(self):
"""Test that preproc layers can transfer state via get/set weights.."""
def get_model():
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
return (model, layer)
input_dataset = np.array([1, 2, 3, 4, 5])
model, layer = get_model()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
# Create a new model and verify it has no state carryover.
weights = model.get_weights()
model_2, _ = get_model()
self.assertAllEqual([[1], [2], [3]], model_2.predict([1., 2., 3.]))
# Transfer state from model to model_2 via get/set weights.
model_2.set_weights(weights)
self.assertAllEqual([[16], [17], [18]], model_2.predict([1., 2., 3.]))
def test_weight_based_state_transfer_with_further_tuning(self):
"""Test that transferred state can be used to further tune a model.."""
def get_model():
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
return (model, layer)
input_dataset = np.array([1, 2, 3, 4, 5])
model, layer = get_model()
layer.adapt(input_dataset)
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
# Transfer state from model to model_2 via get/set weights.
weights = model.get_weights()
model_2, layer_2 = get_model()
model_2.set_weights(weights)
# Further adapt this layer based on the transferred weights.
layer_2.adapt(np.array([1, 2]), reset_state=False)
self.assertAllEqual([[19], [20], [21]], model_2.predict([1., 2., 3.]))
def test_loading_without_providing_class_fails(self):
input_data = keras.Input(shape=(1,))
layer = get_layer()
output = layer(input_data)
model = keras.Model(input_data, output)
if not context.executing_eagerly():
self.evaluate(variables.variables_initializer(model.variables))
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
with self.assertRaisesRegex(RuntimeError, "Unable to restore a layer of"):
_ = keras.models.load_model(output_path)
def test_adapt_sets_input_shape_rank(self):
"""Check that `.adapt()` sets the `input_shape`'s rank."""
# Shape: (3,1,2)
adapt_dataset = np.array([[[1., 2.]],
[[3., 4.]],
[[5., 6.]]], dtype=np.float32)
layer = get_layer()
layer.adapt(adapt_dataset)
input_dataset = np.array([[[1., 2.], [3., 4.]],
[[3., 4.], [5., 6.]]], dtype=np.float32)
layer(input_dataset)
model = keras.Sequential([layer])
self.assertTrue(model.built)
self.assertEqual(model.input_shape, (None, None, None))
def test_adapt_doesnt_overwrite_input_shape(self):
"""Check that `.adapt()` doesn't change the `input_shape`."""
# Shape: (3, 1, 2)
adapt_dataset = np.array([[[1., 2.]],
[[3., 4.]],
[[5., 6.]]], dtype=np.float32)
layer = get_layer(input_shape=[1, 2])
layer.adapt(adapt_dataset)
model = keras.Sequential([layer])
self.assertTrue(model.built)
self.assertEqual(model.input_shape, (None, 1, 2))
@keras_parameterized.run_all_keras_modes
class ConvertToListTest(keras_parameterized.TestCase):
# Note: We need the inputs to be lambdas below to avoid some strangeness with
# TF1.x graph mode - specifically, if the inputs are created outside the test
# function body, the graph inside the test body will not contain the tensors
# that were created in the parameters.
@parameterized.named_parameters(
{
"testcase_name": "ndarray",
"inputs": lambda: np.array([[1, 2, 3], [4, 5, 6]]),
"expected": [[1, 2, 3], [4, 5, 6]]
}, {
"testcase_name": "list",
"inputs": lambda: [[1, 2, 3], [4, 5, 6]],
"expected": [[1, 2, 3], [4, 5, 6]]
}, {
"testcase_name": "tensor",
"inputs": lambda: constant_op.constant([[1, 2, 3], [4, 5, 6]]),
"expected": [[1, 2, 3], [4, 5, 6]]
}, {
"testcase_name":
"ragged_tensor",
"inputs":
lambda: ragged_factory_ops.constant([[1, 2, 3, 4], [4, 5, 6]]),
"expected": [[1, 2, 3, 4], [4, 5, 6]]
}, {
"testcase_name": "sparse_tensor",
"inputs": lambda: sparse_ops.from_dense([[1, 2, 0, 4], [4, 5, 6, 0]]),
"expected": [[1, 2, -1, 4], [4, 5, 6, -1]]
})
def test_conversion(self, inputs, expected):
values = base_preprocessing_layer.convert_to_list(inputs())
self.assertAllEqual(expected, values)
if __name__ == "__main__":
test.main()
| |
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Full CI solver for spin-free Hamiltonian. This solver can be used to compute
doublet, triplet,...
The CI wfn are stored as a 2D array [alpha,beta], where each row corresponds
to an alpha string. For each row (alpha string), there are
total-num-beta-strings of columns. Each column corresponds to a beta string.
Different FCI solvers are implemented to support different type of symmetry.
Symmetry
File Point group Spin singlet Real hermitian* Alpha/beta degeneracy
direct_spin0_symm Yes Yes Yes Yes
direct_spin1_symm Yes No Yes Yes
direct_spin0 No Yes Yes Yes
direct_spin1 No No Yes Yes
direct_uhf No No Yes No
direct_nosym No No No** Yes
* Real hermitian Hamiltonian implies (ij|kl) = (ji|kl) = (ij|lk) = (ji|lk)
** Hamiltonian is real but not hermitian, (ij|kl) != (ji|kl) ...
'''
import sys
import ctypes
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.fci import cistring
from pyscf.fci import rdm
from pyscf.fci import spin_op
from pyscf.fci import addons
from pyscf.fci.spin_op import contract_ss
from pyscf.fci.addons import _unpack_nelec
from pyscf import __config__
libfci = lib.load_library('libfci')
def contract_1e(f1e, fcivec, norb, nelec, link_index=None):
'''Contract the 1-electron Hamiltonian with a FCI vector to get a new FCI
vector.
'''
fcivec = numpy.asarray(fcivec, order='C')
link_indexa, link_indexb = _unpack(norb, nelec, link_index)
na, nlinka = link_indexa.shape[:2]
nb, nlinkb = link_indexb.shape[:2]
assert(fcivec.size == na*nb)
f1e_tril = lib.pack_tril(f1e)
ci1 = numpy.zeros_like(fcivec)
libfci.FCIcontract_a_1e(f1e_tril.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
libfci.FCIcontract_b_1e(f1e_tril.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
return ci1
def contract_2e(eri, fcivec, norb, nelec, link_index=None):
r'''Contract the 4-index tensor eri[pqrs] with a FCI vector
.. math::
|output\rangle = E_{pq} E_{rs} eri_{pq,rs} |CI\rangle \\
E_{pq}E_{rs} = E_{pr,qs} + \delta_{qr} E_{ps} \\
E_{pq} = p^+ q + \bar{p}^+ \bar{q}
E_{pr,qs} = p^+ r^+ s q + \bar{p}^+ r^+ s \bar{q} + ...
:math:`p,q,...` means spin-up orbitals and :math:`\bar{p}, \bar{q}` means
spin-down orbitals.
Note the input argument eri is NOT the 2e hamiltonian tensor. 2e hamiltonian is
.. math::
h2e &= (pq|rs) E_{pr,qs} \\
&= (pq|rs) (E_{pq}E_{rs} - \delta_{qr} E_{ps}) \\
&= eri_{pq,rs} E_{pq}E_{rs} \\
So the relation between eri and hamiltonian (the 2e-integral tensor) is
.. math::
eri_{pq,rs} = (pq|rs) - (1/Nelec) \sum_q (pq|qs)
to restore the symmetry between pq and rs,
.. math::
eri_{pq,rs} = (pq|rs) - (.5/Nelec) [\sum_q (pq|qs) + \sum_p (pq|rp)]
See also :func:`direct_spin1.absorb_h1e`
'''
fcivec = numpy.asarray(fcivec, order='C')
eri = ao2mo.restore(4, eri, norb)
link_indexa, link_indexb = _unpack(norb, nelec, link_index)
na, nlinka = link_indexa.shape[:2]
nb, nlinkb = link_indexb.shape[:2]
assert(fcivec.size == na*nb)
ci1 = numpy.empty_like(fcivec)
libfci.FCIcontract_2e_spin1(eri.ctypes.data_as(ctypes.c_void_p),
fcivec.ctypes.data_as(ctypes.c_void_p),
ci1.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
link_indexa.ctypes.data_as(ctypes.c_void_p),
link_indexb.ctypes.data_as(ctypes.c_void_p))
return ci1
def make_hdiag(h1e, eri, norb, nelec):
'''Diagonal Hamiltonian for Davidson preconditioner
'''
if h1e.dtype == numpy.complex128 or eri.dtype == numpy.complex128:
raise NotImplementedError('Complex Hamiltonian')
neleca, nelecb = _unpack_nelec(nelec)
h1e = numpy.asarray(h1e, order='C')
eri = ao2mo.restore(1, eri, norb)
occslsta = occslstb = cistring._gen_occslst(range(norb), neleca)
if neleca != nelecb:
occslstb = cistring._gen_occslst(range(norb), nelecb)
na = len(occslsta)
nb = len(occslstb)
hdiag = numpy.empty(na*nb)
jdiag = numpy.asarray(numpy.einsum('iijj->ij',eri), order='C')
kdiag = numpy.asarray(numpy.einsum('ijji->ij',eri), order='C')
c_h1e = h1e.ctypes.data_as(ctypes.c_void_p)
c_jdiag = jdiag.ctypes.data_as(ctypes.c_void_p)
c_kdiag = kdiag.ctypes.data_as(ctypes.c_void_p)
libfci.FCImake_hdiag_uhf(hdiag.ctypes.data_as(ctypes.c_void_p),
c_h1e, c_h1e, c_jdiag, c_jdiag, c_jdiag, c_kdiag, c_kdiag,
ctypes.c_int(norb),
ctypes.c_int(na), ctypes.c_int(nb),
ctypes.c_int(neleca), ctypes.c_int(nelecb),
occslsta.ctypes.data_as(ctypes.c_void_p),
occslstb.ctypes.data_as(ctypes.c_void_p))
return hdiag
def absorb_h1e(h1e, eri, norb, nelec, fac=1):
'''Modify 2e Hamiltonian to include 1e Hamiltonian contribution.
'''
if h1e.dtype == numpy.complex128 or eri.dtype == numpy.complex128:
raise NotImplementedError('Complex Hamiltonian')
if not isinstance(nelec, (int, numpy.number)):
nelec = sum(nelec)
h2e = ao2mo.restore(1, eri.copy(), norb)
f1e = h1e - numpy.einsum('jiik->jk', h2e) * .5
f1e = f1e * (1./(nelec+1e-100))
for k in range(norb):
h2e[k,k,:,:] += f1e
h2e[:,:,k,k] += f1e
return ao2mo.restore(4, h2e, norb) * fac
def pspace(h1e, eri, norb, nelec, hdiag=None, np=400):
'''pspace Hamiltonian to improve Davidson preconditioner. See, CPL, 169, 463
'''
if norb > 63:
raise NotImplementedError('norb > 63')
if h1e.dtype == numpy.complex128 or eri.dtype == numpy.complex128:
raise NotImplementedError('Complex Hamiltonian')
neleca, nelecb = _unpack_nelec(nelec)
h1e = numpy.ascontiguousarray(h1e)
eri = ao2mo.restore(1, eri, norb)
nb = cistring.num_strings(norb, nelecb)
if hdiag is None:
hdiag = make_hdiag(h1e, eri, norb, nelec)
if hdiag.size < np:
addr = numpy.arange(hdiag.size)
else:
try:
addr = numpy.argpartition(hdiag, np-1)[:np].copy()
except AttributeError:
addr = numpy.argsort(hdiag)[:np].copy()
addra, addrb = divmod(addr, nb)
stra = cistring.addrs2str(norb, neleca, addra)
strb = cistring.addrs2str(norb, nelecb, addrb)
np = len(addr)
h0 = numpy.zeros((np,np))
libfci.FCIpspace_h0tril(h0.ctypes.data_as(ctypes.c_void_p),
h1e.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
stra.ctypes.data_as(ctypes.c_void_p),
strb.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(np))
HERMITIAN_THRESHOLD = 1e-10
if (abs(h1e - h1e.T).max() < HERMITIAN_THRESHOLD and
abs(eri - eri.transpose(1,0,3,2)).max() < HERMITIAN_THRESHOLD):
# symmetric Hamiltonian
h0 = lib.hermi_triu(h0)
else:
# Fill the upper triangular part
h0 = numpy.asarray(h0, order='F')
h1e = numpy.asarray(h1e.T, order='C')
eri = numpy.asarray(eri.transpose(1,0,3,2), order='C')
libfci.FCIpspace_h0tril(h0.ctypes.data_as(ctypes.c_void_p),
h1e.ctypes.data_as(ctypes.c_void_p),
eri.ctypes.data_as(ctypes.c_void_p),
stra.ctypes.data_as(ctypes.c_void_p),
strb.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(norb), ctypes.c_int(np))
idx = numpy.arange(np)
h0[idx,idx] = hdiag[addr]
return addr, h0
# be careful with single determinant initial guess. It may diverge the
# preconditioner when the eigvalue of first davidson iter equals to hdiag
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
ecore=0, **kwargs):
return _kfactory(FCISolver, h1e, eri, norb, nelec, ci0, level_shift,
tol, lindep, max_cycle, max_space, nroots,
davidson_only, pspace_size, ecore=ecore, **kwargs)
def _kfactory(Solver, h1e, eri, norb, nelec, ci0=None, level_shift=1e-3,
tol=1e-10, lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, ecore=0, **kwargs):
cis = Solver(None)
cis.level_shift = level_shift
cis.conv_tol = tol
cis.lindep = lindep
cis.max_cycle = max_cycle
cis.max_space = max_space
cis.nroots = nroots
cis.davidson_only = davidson_only
cis.pspace_size = pspace_size
unknown = {}
for k in kwargs:
if not hasattr(cis, k):
unknown[k] = kwargs[k]
setattr(cis, k, kwargs[k])
if unknown:
sys.stderr.write('Unknown keys %s for FCI kernel %s\n' %
(str(unknown.keys()), __name__))
e, c = cis.kernel(h1e, eri, norb, nelec, ci0, ecore=ecore, **unknown)
return e, c
def energy(h1e, eri, fcivec, norb, nelec, link_index=None):
'''Compute the FCI electronic energy for given Hamiltonian and FCI vector.
'''
h2e = absorb_h1e(h1e, eri, norb, nelec, .5)
ci1 = contract_2e(h2e, fcivec, norb, nelec, link_index)
return numpy.dot(fcivec.reshape(-1), ci1.reshape(-1))
def make_rdm1s(fcivec, norb, nelec, link_index=None):
r'''Spin separated 1-particle density matrices.
The return values include two density matrices: (alpha,alpha), (beta,beta)
dm1[p,q] = <q^\dagger p>
The convention is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if link_index is None:
neleca, nelecb = _unpack_nelec(nelec)
link_indexa = cistring.gen_linkstr_index(range(norb), neleca)
link_indexb = cistring.gen_linkstr_index(range(norb), nelecb)
link_index = (link_indexa, link_indexb)
rdm1a = rdm.make_rdm1_spin1('FCImake_rdm1a', fcivec, fcivec,
norb, nelec, link_index)
rdm1b = rdm.make_rdm1_spin1('FCImake_rdm1b', fcivec, fcivec,
norb, nelec, link_index)
return rdm1a, rdm1b
def make_rdm1(fcivec, norb, nelec, link_index=None):
r'''Spin-traced one-particle density matrix
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention is based on McWeeney's book, Eq (5.4.20)
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
rdm1a, rdm1b = make_rdm1s(fcivec, norb, nelec, link_index)
return rdm1a + rdm1b
def make_rdm12s(fcivec, norb, nelec, link_index=None, reorder=True):
r'''Spin separated 1- and 2-particle density matrices.
The return values include two lists, a list of 1-particle density matrices
and a list of 2-particle density matrices. The density matrices are:
(alpha,alpha), (beta,beta) for 1-particle density matrices;
(alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta),
(beta,beta,beta,beta) for 2-particle density matrices.
1pdm[p,q] = :math:`\langle q^\dagger p\rangle`;
2pdm[p,q,r,s] = :math:`\langle p^\dagger r^\dagger s q\rangle`.
Energy should be computed as
E = einsum('pq,qp', h1, 1pdm) + 1/2 * einsum('pqrs,pqrs', eri, 2pdm)
where h1[p,q] = <p|h|q> and eri[p,q,r,s] = (pq|rs)
'''
dm1a, dm2aa = rdm.make_rdm12_spin1('FCIrdm12kern_a', fcivec, fcivec,
norb, nelec, link_index, 1)
dm1b, dm2bb = rdm.make_rdm12_spin1('FCIrdm12kern_b', fcivec, fcivec,
norb, nelec, link_index, 1)
_, dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', fcivec, fcivec,
norb, nelec, link_index, 0)
if reorder:
dm1a, dm2aa = rdm.reorder_rdm(dm1a, dm2aa, inplace=True)
dm1b, dm2bb = rdm.reorder_rdm(dm1b, dm2bb, inplace=True)
return (dm1a, dm1b), (dm2aa, dm2ab, dm2bb)
def make_rdm12(fcivec, norb, nelec, link_index=None, reorder=True):
r'''Spin traced 1- and 2-particle density matrices.
1pdm[p,q] = :math:`\langle q_\alpha^\dagger p_\alpha \rangle +
\langle q_\beta^\dagger p_\beta \rangle`;
2pdm[p,q,r,s] = :math:`\langle p_\alpha^\dagger r_\alpha^\dagger s_\alpha q_\alpha\rangle +
\langle p_\beta^\dagger r_\alpha^\dagger s_\alpha q_\beta\rangle +
\langle p_\alpha^\dagger r_\beta^\dagger s_\beta q_\alpha\rangle +
\langle p_\beta^\dagger r_\beta^\dagger s_\beta q_\beta\rangle`.
Energy should be computed as
E = einsum('pq,qp', h1, 1pdm) + 1/2 * einsum('pqrs,pqrs', eri, 2pdm)
where h1[p,q] = <p|h|q> and eri[p,q,r,s] = (pq|rs)
'''
#(dm1a, dm1b), (dm2aa, dm2ab, dm2bb) = \
# make_rdm12s(fcivec, norb, nelec, link_index, reorder)
#return dm1a+dm1b, dm2aa+dm2ab+dm2ab.transpose(2,3,0,1)+dm2bb
dm1, dm2 = rdm.make_rdm12_spin1('FCIrdm12kern_sf', fcivec, fcivec,
norb, nelec, link_index, 1)
if reorder:
dm1, dm2 = rdm.reorder_rdm(dm1, dm2, inplace=True)
return dm1, dm2
def trans_rdm1s(cibra, ciket, norb, nelec, link_index=None):
r'''Spin separated transition 1-particle density matrices.
The return values include two density matrices: (alpha,alpha), (beta,beta).
See also function :func:`make_rdm1s`
1pdm[p,q] = :math:`\langle q^\dagger p \rangle`
'''
rdm1a = rdm.make_rdm1_spin1('FCItrans_rdm1a', cibra, ciket,
norb, nelec, link_index)
rdm1b = rdm.make_rdm1_spin1('FCItrans_rdm1b', cibra, ciket,
norb, nelec, link_index)
return rdm1a, rdm1b
def trans_rdm1(cibra, ciket, norb, nelec, link_index=None):
r'''Spin traced transition 1-particle transition density matrices.
1pdm[p,q] = :math:`\langle q_\alpha^\dagger p_\alpha \rangle
+ \langle q_\beta^\dagger p_\beta \rangle`
'''
rdm1a, rdm1b = trans_rdm1s(cibra, ciket, norb, nelec, link_index)
return rdm1a + rdm1b
def trans_rdm12s(cibra, ciket, norb, nelec, link_index=None, reorder=True):
r'''Spin separated 1- and 2-particle transition density matrices.
The return values include two lists, a list of 1-particle transition
density matrices and a list of 2-particle transition density matrices.
The density matrices are:
(alpha,alpha), (beta,beta) for 1-particle transition density matrices;
(alpha,alpha,alpha,alpha), (alpha,alpha,beta,beta),
(beta,beta,alpha,alpha), (beta,beta,beta,beta) for 2-particle transition
density matrices.
1pdm[p,q] = :math:`\langle q^\dagger p\rangle`;
2pdm[p,q,r,s] = :math:`\langle p^\dagger r^\dagger s q\rangle`.
'''
dm1a, dm2aa = rdm.make_rdm12_spin1('FCItdm12kern_a', cibra, ciket,
norb, nelec, link_index, 2)
dm1b, dm2bb = rdm.make_rdm12_spin1('FCItdm12kern_b', cibra, ciket,
norb, nelec, link_index, 2)
_, dm2ab = rdm.make_rdm12_spin1('FCItdm12kern_ab', cibra, ciket,
norb, nelec, link_index, 0)
_, dm2ba = rdm.make_rdm12_spin1('FCItdm12kern_ab', ciket, cibra,
norb, nelec, link_index, 0)
dm2ba = dm2ba.transpose(3,2,1,0)
if reorder:
dm1a, dm2aa = rdm.reorder_rdm(dm1a, dm2aa, inplace=True)
dm1b, dm2bb = rdm.reorder_rdm(dm1b, dm2bb, inplace=True)
return (dm1a, dm1b), (dm2aa, dm2ab, dm2ba, dm2bb)
def trans_rdm12(cibra, ciket, norb, nelec, link_index=None, reorder=True):
r'''Spin traced transition 1- and 2-particle transition density matrices.
1pdm[p,q] = :math:`\langle q^\dagger p\rangle`;
2pdm[p,q,r,s] = :math:`\langle p^\dagger r^\dagger s q\rangle`.
'''
#(dm1a, dm1b), (dm2aa, dm2ab, dm2ba, dm2bb) = \
# trans_rdm12s(cibra, ciket, norb, nelec, link_index, reorder)
#return dm1a+dm1b, dm2aa+dm2ab+dm2ba+dm2bb
dm1, dm2 = rdm.make_rdm12_spin1('FCItdm12kern_sf', cibra, ciket,
norb, nelec, link_index, 2)
if reorder:
dm1, dm2 = rdm.reorder_rdm(dm1, dm2, inplace=True)
return dm1, dm2
def _get_init_guess(na, nb, nroots, hdiag):
'''Initial guess is the single Slater determinant
'''
# The "nroots" lowest determinats based on energy expectation value.
ci0 = []
try:
addrs = numpy.argpartition(hdiag, nroots-1)[:nroots]
except AttributeError:
addrs = numpy.argsort(hdiag)[:nroots]
for addr in addrs:
x = numpy.zeros((na*nb))
x[addr] = 1
ci0.append(x.ravel())
# Add noise
ci0[0][0 ] += 1e-5
ci0[0][-1] -= 1e-5
return ci0
def get_init_guess(norb, nelec, nroots, hdiag):
'''Initial guess is the single Slater determinant
'''
neleca, nelecb = _unpack_nelec(nelec)
na = cistring.num_strings(norb, neleca)
nb = cistring.num_strings(norb, nelecb)
return _get_init_guess(na, nb, nroots, hdiag)
###############################################################
# direct-CI driver
###############################################################
def kernel_ms1(fci, h1e, eri, norb, nelec, ci0=None, link_index=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None, pspace_size=None,
max_memory=None, verbose=None, ecore=0, **kwargs):
if nroots is None: nroots = fci.nroots
if davidson_only is None: davidson_only = fci.davidson_only
if pspace_size is None: pspace_size = fci.pspace_size
if max_memory is None:
max_memory = fci.max_memory - lib.current_memory()[0]
log = logger.new_logger(fci, verbose)
nelec = _unpack_nelec(nelec, fci.spin)
assert(0 <= nelec[0] <= norb and 0 <= nelec[1] <= norb)
link_indexa, link_indexb = _unpack(norb, nelec, link_index)
na = link_indexa.shape[0]
nb = link_indexb.shape[0]
if max_memory < na*nb*6*8e-6:
log.warn('Not enough memory for FCI solver. '
'The minimal requirement is %.0f MB', na*nb*60e-6)
hdiag = fci.make_hdiag(h1e, eri, norb, nelec)
nroots = min(hdiag.size, nroots)
try:
addr, h0 = fci.pspace(h1e, eri, norb, nelec, hdiag, max(pspace_size,nroots))
if pspace_size > 0:
pw, pv = fci.eig(h0)
else:
pw = pv = None
if pspace_size >= na*nb and ci0 is None and not davidson_only:
# The degenerated wfn can break symmetry. The davidson iteration with proper
# initial guess doesn't have this issue
if na*nb == 1:
return pw[0]+ecore, pv[:,0].reshape(1,1)
elif nroots > 1:
civec = numpy.empty((nroots,na*nb))
civec[:,addr] = pv[:,:nroots].T
return pw[:nroots]+ecore, [c.reshape(na,nb) for c in civec]
elif abs(pw[0]-pw[1]) > 1e-12:
civec = numpy.empty((na*nb))
civec[addr] = pv[:,0]
return pw[0]+ecore, civec.reshape(na,nb)
except NotImplementedError:
addr = [0]
pw = pv = None
precond = fci.make_precond(hdiag, pw, pv, addr)
h2e = fci.absorb_h1e(h1e, eri, norb, nelec, .5)
def hop(c):
hc = fci.contract_2e(h2e, c, norb, nelec, (link_indexa,link_indexb))
return hc.ravel()
if ci0 is None:
if callable(getattr(fci, 'get_init_guess', None)):
ci0 = lambda: fci.get_init_guess(norb, nelec, nroots, hdiag)
else:
def ci0(): # lazy initialization to reduce memory footprint
x0 = []
for i in range(nroots):
x = numpy.zeros(na*nb)
x[addr[i]] = 1
x0.append(x)
return x0
elif not callable(ci0):
if isinstance(ci0, numpy.ndarray) and ci0.size == na*nb:
ci0 = [ci0.ravel()]
else:
ci0 = [x.ravel() for x in ci0]
# Add vectors if not enough initial guess is given
if len(ci0) < nroots:
if callable(getattr(fci, 'get_init_guess', None)):
ci0.extend(fci.get_init_guess(norb, nelec, nroots, hdiag)[len(ci0):])
else:
for i in range(len(ci0), nroots):
x = numpy.zeros(na*nb)
x[addr[i]] = 1
ci0.append(x)
if tol is None: tol = fci.conv_tol
if lindep is None: lindep = fci.lindep
if max_cycle is None: max_cycle = fci.max_cycle
if max_space is None: max_space = fci.max_space
tol_residual = getattr(fci, 'conv_tol_residual', None)
with lib.with_omp_threads(fci.threads):
#e, c = lib.davidson(hop, ci0, precond, tol=fci.conv_tol, lindep=fci.lindep)
e, c = fci.eig(hop, ci0, precond, tol=tol, lindep=lindep,
max_cycle=max_cycle, max_space=max_space, nroots=nroots,
max_memory=max_memory, verbose=log, follow_state=True,
tol_residual=tol_residual, **kwargs)
if nroots > 1:
return e+ecore, [ci.reshape(na,nb) for ci in c]
else:
return e+ecore, c.reshape(na,nb)
def make_pspace_precond(hdiag, pspaceig, pspaceci, addr, level_shift=0):
# precondition with pspace Hamiltonian, CPL, 169, 463
def precond(r, e0, x0, *args):
#h0e0 = h0 - numpy.eye(len(addr))*(e0-level_shift)
h0e0inv = numpy.dot(pspaceci/(pspaceig-(e0-level_shift)), pspaceci.T)
hdiaginv = 1/(hdiag - (e0-level_shift))
hdiaginv[abs(hdiaginv)>1e8] = 1e8
h0x0 = x0 * hdiaginv
#h0x0[addr] = numpy.linalg.solve(h0e0, x0[addr])
h0x0[addr] = numpy.dot(h0e0inv, x0[addr])
h0r = r * hdiaginv
#h0r[addr] = numpy.linalg.solve(h0e0, r[addr])
h0r[addr] = numpy.dot(h0e0inv, r[addr])
e1 = numpy.dot(x0, h0r) / numpy.dot(x0, h0x0)
x1 = r - e1*x0
#pspace_x1 = x1[addr].copy()
x1 *= hdiaginv
# pspace (h0-e0)^{-1} cause diverging?
#x1[addr] = numpy.linalg.solve(h0e0, pspace_x1)
return x1
return precond
def make_diag_precond(hdiag, pspaceig, pspaceci, addr, level_shift=0):
return lib.make_diag_precond(hdiag, level_shift)
class FCIBase(lib.StreamObject):
'''Full CI solver
Attributes:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`.
max_cycle : int
Total number of iterations. Default is 100
max_space : tuple of int
Davidson iteration space size. Default is 14.
conv_tol : float
Energy convergence tolerance. Default is 1e-10.
level_shift : float
Level shift applied in the preconditioner to avoid singularity.
Default is 1e-3
davidson_only : bool
By default, the entire Hamiltonian matrix will be constructed and
diagonalized if the system is small (see attribute pspace_size).
Setting this parameter to True will enforce the eigenvalue
problems being solved by Davidson subspace algorithm. This flag
should be enabled when initial guess is given or particular spin
symmetry or point-group symmetry is required because the initial
guess or symmetry are completely ignored in the direct diagonlization.
pspace_size : int
The dimension of Hamiltonian matrix over which Davidson iteration
algorithm will be used for the eigenvalue problem. Default is 400.
This is roughly corresponding to a (6e,6o) system.
nroots : int
Number of states to be solved. Default is 1, the ground state.
spin : int or None
Spin (2S = nalpha-nbeta) of the system. If this attribute is None,
spin will be determined by the argument nelec (number of electrons)
of the kernel function.
wfnsym : str or int
Symmetry of wavefunction. It is used only in direct_spin1_symm
and direct_spin0_symm solver.
Saved results
eci : float or a list of float
FCI energy(ies)
ci : nparray
FCI wfn vector(s)
converged : bool (or a list of bool for multiple roots)
Whether davidson iteration is converged
Examples:
>>> from pyscf import gto, scf, ao2mo, fci
>>> mol = gto.M(atom='Li 0 0 0; Li 0 0 1', basis='sto-3g')
>>> mf = scf.RHF(mol).run()
>>> h1 = mf.mo_coeff.T.dot(mf.get_hcore()).dot(mf.mo_coeff)
>>> eri = ao2mo.kernel(mol, mf.mo_coeff)
>>> cisolver = fci.direct_spin1.FCI(mol)
>>> e, ci = cisolver.kernel(h1, eri, h1.shape[1], mol.nelec, ecore=mol.energy_nuc())
>>> print(e)
-14.4197890826
'''
max_cycle = getattr(__config__, 'fci_direct_spin1_FCI_max_cycle', 100)
max_space = getattr(__config__, 'fci_direct_spin1_FCI_max_space', 12)
conv_tol = getattr(__config__, 'fci_direct_spin1_FCI_conv_tol', 1e-10)
conv_tol_residual = getattr(__config__, 'fci_direct_spin1_FCI_conv_tol_residual', None)
lindep = getattr(__config__, 'fci_direct_spin1_FCI_lindep', 1e-14)
# level shift in precond
level_shift = getattr(__config__, 'fci_direct_spin1_FCI_level_shift', 1e-3)
# force the diagonlization use davidson iteration. When the CI space
# is small, the solver exactly diagonlizes the Hamiltonian. But this
# solution will ignore the initial guess. Setting davidson_only can
# enforce the solution on the initial guess state
davidson_only = getattr(__config__, 'fci_direct_spin1_FCI_davidson_only', False)
pspace_size = getattr(__config__, 'fci_direct_spin1_FCI_pspace_size', 400)
threads = getattr(__config__, 'fci_direct_spin1_FCI_threads', None)
lessio = getattr(__config__, 'fci_direct_spin1_FCI_lessio', False)
def __init__(self, mol=None):
if mol is None:
self.stdout = sys.stdout
self.verbose = logger.NOTE
self.max_memory = lib.param.MAX_MEMORY
else:
self.stdout = mol.stdout
self.verbose = mol.verbose
self.max_memory = mol.max_memory
self.mol = mol
self.nroots = 1
self.spin = None
# Initialize symmetry attributes for the compatibility with direct_spin1_symm
# solver. They are not used by direct_spin1 solver.
self.orbsym = None
self.wfnsym = None
self.converged = False
self.norb = None
self.nelec = None
self.eci = None
self.ci = None
keys = set(('max_cycle', 'max_space', 'conv_tol', 'lindep',
'level_shift', 'davidson_only', 'pspace_size', 'threads',
'lessio'))
self._keys = set(self.__dict__.keys()).union(keys)
@property
def e_tot(self):
return self.eci
@property
def nstates(self):
return self.nroots
@nstates.setter
def nstates(self, x):
self.nroots = x
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('******** %s ********', self.__class__)
log.info('max. cycles = %d', self.max_cycle)
log.info('conv_tol = %g', self.conv_tol)
log.info('davidson only = %s', self.davidson_only)
log.info('linear dependence = %g', self.lindep)
log.info('level shift = %g', self.level_shift)
log.info('max iter space = %d', self.max_space)
log.info('max_memory %d MB', self.max_memory)
log.info('nroots = %d', self.nroots)
log.info('pspace_size = %d', self.pspace_size)
log.info('spin = %s', self.spin)
return self
@lib.with_doc(absorb_h1e.__doc__)
def absorb_h1e(self, h1e, eri, norb, nelec, fac=1):
nelec = _unpack_nelec(nelec, self.spin)
return absorb_h1e(h1e, eri, norb, nelec, fac)
@lib.with_doc(make_hdiag.__doc__)
def make_hdiag(self, h1e, eri, norb, nelec):
nelec = _unpack_nelec(nelec, self.spin)
return make_hdiag(h1e, eri, norb, nelec)
@lib.with_doc(pspace.__doc__)
def pspace(self, h1e, eri, norb, nelec, hdiag=None, np=400):
nelec = _unpack_nelec(nelec, self.spin)
return pspace(h1e, eri, norb, nelec, hdiag, np)
@lib.with_doc(contract_1e.__doc__)
def contract_1e(self, f1e, fcivec, norb, nelec, link_index=None, **kwargs):
nelec = _unpack_nelec(nelec, self.spin)
return contract_1e(f1e, fcivec, norb, nelec, link_index, **kwargs)
@lib.with_doc(contract_2e.__doc__)
def contract_2e(self, eri, fcivec, norb, nelec, link_index=None, **kwargs):
nelec = _unpack_nelec(nelec, self.spin)
return contract_2e(eri, fcivec, norb, nelec, link_index, **kwargs)
def eig(self, op, x0=None, precond=None, **kwargs):
if isinstance(op, numpy.ndarray):
self.converged = True
return scipy.linalg.eigh(op)
self.converged, e, ci = \
lib.davidson1(lambda xs: [op(x) for x in xs],
x0, precond, lessio=self.lessio, **kwargs)
if kwargs['nroots'] == 1:
self.converged = self.converged[0]
e = e[0]
ci = ci[0]
return e, ci
def make_precond(self, hdiag, pspaceig, pspaceci, addr):
if pspaceig is None:
return make_diag_precond(hdiag, pspaceig, pspaceci, addr,
self.level_shift)
else:
return make_pspace_precond(hdiag, pspaceig, pspaceci, addr,
self.level_shift)
@lib.with_doc(get_init_guess.__doc__)
def get_init_guess(self, norb, nelec, nroots, hdiag):
return get_init_guess(norb, nelec, nroots, hdiag)
def kernel(self, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None, pspace_size=None,
orbsym=None, wfnsym=None, ecore=0, **kwargs):
if self.verbose >= logger.WARN:
self.check_sanity()
self.norb = norb
self.nelec = nelec
self.eci, self.ci = \
kernel_ms1(self, h1e, eri, norb, nelec, ci0, None,
tol, lindep, max_cycle, max_space, nroots,
davidson_only, pspace_size, ecore=ecore, **kwargs)
return self.eci, self.ci
@lib.with_doc(energy.__doc__)
def energy(self, h1e, eri, fcivec, norb, nelec, link_index=None):
nelec = _unpack_nelec(nelec, self.spin)
h2e = self.absorb_h1e(h1e, eri, norb, nelec, .5)
ci1 = self.contract_2e(h2e, fcivec, norb, nelec, link_index)
return numpy.dot(fcivec.reshape(-1), ci1.reshape(-1))
def spin_square(self, fcivec, norb, nelec):
nelec = _unpack_nelec(nelec, self.spin)
return spin_op.spin_square0(fcivec, norb, nelec)
spin_square.__doc__ = spin_op.spin_square0.__doc__
@lib.with_doc(make_rdm1s.__doc__)
def make_rdm1s(self, fcivec, norb, nelec, link_index=None):
nelec = _unpack_nelec(nelec, self.spin)
return make_rdm1s(fcivec, norb, nelec, link_index)
@lib.with_doc(make_rdm1.__doc__)
def make_rdm1(self, fcivec, norb, nelec, link_index=None):
nelec = _unpack_nelec(nelec, self.spin)
return make_rdm1(fcivec, norb, nelec, link_index)
@lib.with_doc(make_rdm12s.__doc__)
def make_rdm12s(self, fcivec, norb, nelec, link_index=None, reorder=True):
nelec = _unpack_nelec(nelec, self.spin)
return make_rdm12s(fcivec, norb, nelec, link_index, reorder)
@lib.with_doc(make_rdm12.__doc__)
def make_rdm12(self, fcivec, norb, nelec, link_index=None, reorder=True):
nelec = _unpack_nelec(nelec, self.spin)
return make_rdm12(fcivec, norb, nelec, link_index, reorder)
def make_rdm2(self, fcivec, norb, nelec, link_index=None, reorder=True):
r'''Spin traced 2-particle density matrice
NOTE the 2pdm is :math:`\langle p^\dagger q^\dagger s r\rangle` but
stored as [p,r,q,s]
'''
nelec = _unpack_nelec(nelec, self.spin)
return self.make_rdm12(fcivec, norb, nelec, link_index, reorder)[1]
@lib.with_doc(trans_rdm1s.__doc__)
def trans_rdm1s(self, cibra, ciket, norb, nelec, link_index=None):
nelec = _unpack_nelec(nelec, self.spin)
return trans_rdm1s(cibra, ciket, norb, nelec, link_index)
@lib.with_doc(trans_rdm1.__doc__)
def trans_rdm1(self, cibra, ciket, norb, nelec, link_index=None):
nelec = _unpack_nelec(nelec, self.spin)
return trans_rdm1(cibra, ciket, norb, nelec, link_index)
@lib.with_doc(trans_rdm12s.__doc__)
def trans_rdm12s(self, cibra, ciket, norb, nelec, link_index=None,
reorder=True):
nelec = _unpack_nelec(nelec, self.spin)
return trans_rdm12s(cibra, ciket, norb, nelec, link_index, reorder)
@lib.with_doc(trans_rdm12.__doc__)
def trans_rdm12(self, cibra, ciket, norb, nelec, link_index=None,
reorder=True):
nelec = _unpack_nelec(nelec, self.spin)
return trans_rdm12(cibra, ciket, norb, nelec, link_index, reorder)
def large_ci(self, fcivec, norb, nelec,
tol=getattr(__config__, 'fci_addons_large_ci_tol', .1),
return_strs=getattr(__config__, 'fci_addons_large_ci_return_strs', True)):
nelec = _unpack_nelec(nelec, self.spin)
return addons.large_ci(fcivec, norb, nelec, tol, return_strs)
def contract_ss(self, fcivec, norb, nelec): # noqa: F811
from pyscf.fci import spin_op
nelec = _unpack_nelec(nelec, self.spin)
return spin_op.contract_ss(fcivec, norb, nelec)
def gen_linkstr(self, norb, nelec, tril=True, spin=None):
if spin is None:
spin = self.spin
neleca, nelecb = _unpack_nelec(nelec, spin)
if tril:
link_indexa = cistring.gen_linkstr_index_trilidx(range(norb), neleca)
link_indexb = cistring.gen_linkstr_index_trilidx(range(norb), nelecb)
else:
link_indexa = cistring.gen_linkstr_index(range(norb), neleca)
link_indexb = cistring.gen_linkstr_index(range(norb), nelecb)
return link_indexa, link_indexb
class FCISolver(FCIBase):
# transform_ci_for_orbital_rotation only available for FCI wavefunctions.
# Some approx FCI solver does not have this functionality.
def transform_ci_for_orbital_rotation(self, fcivec, norb, nelec, u):
nelec = _unpack_nelec(nelec, self.spin)
return addons.transform_ci_for_orbital_rotation(fcivec, norb, nelec, u)
FCI = FCISolver
def _unpack(norb, nelec, link_index, spin=None):
if link_index is None:
neleca, nelecb = _unpack_nelec(nelec, spin)
link_indexa = link_indexb = cistring.gen_linkstr_index_trilidx(range(norb), neleca)
if neleca != nelecb:
link_indexb = cistring.gen_linkstr_index_trilidx(range(norb), nelecb)
return link_indexa, link_indexb
else:
return link_index
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
#['H', ( 0.,-0.5 ,-1. )],
#['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g'}
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
cis = FCISolver(mol)
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron - 2
h1e = reduce(numpy.dot, (m.mo_coeff.T, m.get_hcore(), m.mo_coeff))
eri = ao2mo.incore.general(m._eri, (m.mo_coeff,)*4, compact=False)
eri = eri.reshape(norb,norb,norb,norb)
nea = nelec//2 + 1
neb = nelec//2 - 1
nelec = (nea, neb)
e1 = cis.kernel(h1e, eri, norb, nelec, davidson_only=True)[0]
print(e1, e1 - -7.7466756526056004)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class FeaturesOperations(object):
"""FeaturesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for this operation. Constant value: "2015-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-12-01"
self.config = config
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the preview features that are available through AFEC for the
subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of FeatureResult
:rtype:
~azure.mgmt.resource.features.v2015_12_01.models.FeatureResultPaged[~azure.mgmt.resource.features.v2015_12_01.models.FeatureResult]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Features/features'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.FeatureResultPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.FeatureResultPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_provider_namespace, custom_headers=None, raw=False, **operation_config):
"""Gets all the preview features in a provider namespace that are
available through AFEC for the subscription.
:param resource_provider_namespace: The namespace of the resource
provider for getting features.
:type resource_provider_namespace: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of FeatureResult
:rtype:
~azure.mgmt.resource.features.v2015_12_01.models.FeatureResultPaged[~azure.mgmt.resource.features.v2015_12_01.models.FeatureResult]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features'
path_format_arguments = {
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.FeatureResultPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.FeatureResultPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_provider_namespace, feature_name, custom_headers=None, raw=False, **operation_config):
"""Gets the preview feature with the specified name.
:param resource_provider_namespace: The resource provider namespace
for the feature.
:type resource_provider_namespace: str
:param feature_name: The name of the feature to get.
:type feature_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: FeatureResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.resource.features.v2015_12_01.models.FeatureResult
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}'
path_format_arguments = {
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'featureName': self._serialize.url("feature_name", feature_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FeatureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def register(
self, resource_provider_namespace, feature_name, custom_headers=None, raw=False, **operation_config):
"""Registers the preview feature for the subscription.
:param resource_provider_namespace: The namespace of the resource
provider.
:type resource_provider_namespace: str
:param feature_name: The name of the feature to register.
:type feature_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: FeatureResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.resource.features.v2015_12_01.models.FeatureResult
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}/register'
path_format_arguments = {
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'featureName': self._serialize.url("feature_name", feature_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FeatureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| |
# google sheets
import logging
import json
import datetime
import httplib2
import googleapiclient.discovery
from google.oauth2 import service_account
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils import timezone
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import user_passes_test
from django.views.decorators.debug import sensitive_variables, sensitive_post_parameters
from dojo.models import Finding, System_Settings, Test, Dojo_User, Note_Type, NoteHistory, Notes, Sonarqube_Issue
from dojo.forms import GoogleSheetFieldsForm
from dojo.utils import add_breadcrumb, Product_Tab
logger = logging.getLogger(__name__)
@sensitive_post_parameters()
@user_passes_test(lambda u: u.is_superuser)
def configure_google_sheets(request):
fields = Finding._meta.fields
system_settings = get_object_or_404(System_Settings, id=1)
revoke_access = False
if system_settings.credentials:
revoke_access = True
column_details = json.loads(system_settings.column_widths.replace("'", '"'))
initial = {}
for field in fields:
initial[field.name] = column_details[field.name][0]
if column_details[field.name][1] == 0:
initial['Protect ' + field.name] = False
else:
initial['Protect ' + field.name] = True
initial['drive_folder_ID'] = system_settings.drive_folder_ID
initial['email_address'] = system_settings.email_address
initial['enable_service'] = system_settings.enable_google_sheets
form = GoogleSheetFieldsForm(all_fields=fields, initial=initial, credentials_required=False)
else:
form = GoogleSheetFieldsForm(all_fields=fields, credentials_required=True)
if request.method == 'POST':
if system_settings.credentials:
form = GoogleSheetFieldsForm(request.POST, request.FILES, all_fields=fields, credentials_required=False)
else:
form = GoogleSheetFieldsForm(request.POST, request.FILES, all_fields=fields, credentials_required=True)
if request.POST.get('revoke'):
system_settings.column_widths = ""
system_settings.credentials = ""
system_settings.drive_folder_ID = ""
system_settings.email_address = ""
system_settings.enable_google_sheets = False
system_settings.save()
messages.add_message(
request,
messages.SUCCESS,
"Access revoked",
extra_tags="alert-success",)
return HttpResponseRedirect(reverse('dashboard'))
if request.POST.get('update'):
if form.is_valid():
# Create a dictionary object from the uploaded credentials file
if len(request.FILES) != 0:
cred_file = request.FILES['cred_file']
cred_byte = cred_file.read() # read data from the temporary uploaded file
cred_str = cred_byte.decode('utf8') # convert bytes object to string
initial = True
else:
cred_str = system_settings.credentials
initial = False
# Get the drive folder ID
drive_folder_ID = form.cleaned_data['drive_folder_ID']
validate_inputs = validate_drive_authentication(request, cred_str, drive_folder_ID)
if validate_inputs:
# Create a dictionary of column names and widths
column_widths = {}
for i in fields:
column_widths[i.name] = []
column_widths[i.name].append(form.cleaned_data[i.name])
if form.cleaned_data['Protect ' + i.name]:
column_widths[i.name].append(1)
else:
column_widths[i.name].append(0)
system_settings.column_widths = column_widths
system_settings.credentials = cred_str
system_settings.drive_folder_ID = drive_folder_ID
system_settings.email_address = form.cleaned_data['email_address']
system_settings.enable_google_sheets = form.cleaned_data['enable_service']
system_settings.save()
if initial:
messages.add_message(
request,
messages.SUCCESS,
"Google Drive configuration saved successfully.",
extra_tags="alert-success",
)
else:
messages.add_message(
request,
messages.SUCCESS,
"Google Drive configuration updated successfully.",
extra_tags="alert-success",
)
return HttpResponseRedirect(reverse('dashboard'))
else:
system_settings.enable_google_sheets = False
system_settings.save()
add_breadcrumb(title="Google Sheet Sync Configuration", top_level=True, request=request)
return render(request, 'dojo/google_sheet_configuration.html', {
'name': 'Google Sheet Sync Configuration',
'metric': False,
'form': form,
'revoke_access': revoke_access,
})
@sensitive_variables('cred_str', 'drive_folder_ID', 'service_account_info')
def validate_drive_authentication(request, cred_str, drive_folder_ID):
SCOPES = ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/spreadsheets']
service_account_info = json.loads(cred_str)
try:
# Validate the uploaded credentials file
credentials = service_account.Credentials.from_service_account_info(service_account_info, scopes=SCOPES)
except ValueError:
messages.add_message(
request,
messages.ERROR,
'Invalid credentials file.',
extra_tags='alert-danger')
return False
else:
sheets_service = googleapiclient.discovery.build('sheets', 'v4', credentials=credentials, cache_discovery=False)
drive_service = googleapiclient.discovery.build('drive', 'v3', credentials=credentials, cache_discovery=False)
spreadsheet = {
'properties': {
'title': 'Test spreadsheet'
}
}
try:
# Check the sheets API is enabled or not
spreadsheet = sheets_service.spreadsheets().create(body=spreadsheet, fields='spreadsheetId').execute()
except googleapiclient.errors.HttpError:
messages.add_message(
request,
messages.ERROR,
'Enable the Google Sheets API from the Google Developer Console.',
extra_tags='alert-danger')
return False
else:
spreadsheetId = spreadsheet.get('spreadsheetId')
try:
# Check the drive API is enabled or not
file = drive_service.files().get(fileId=spreadsheetId, fields='parents').execute() # Retrieve the existing parents to remove
except googleapiclient.errors.HttpError:
messages.add_message(
request,
messages.ERROR,
'Enable the Google Drive API from the Google Developer Console.',
extra_tags='alert-danger')
return False
else:
previous_parents = ",".join(file.get('parents'))
folder_id = drive_folder_ID
try:
# Validate the drive folder id and it's permissions
file = drive_service.files().update(fileId=spreadsheetId, # Move the file to the new folder
addParents=folder_id,
removeParents=previous_parents,
fields='id, parents').execute()
except googleapiclient.errors.HttpError as error:
if error.resp.status == 403:
messages.add_message(
request,
messages.ERROR,
'Unable to write to the given Google Drive folder',
extra_tags='alert-danger')
if error.resp.status == 404:
messages.add_message(
request,
messages.ERROR,
'Invalid Google Drive folder ID',
extra_tags='alert-danger')
return False
else:
drive_service.files().delete(fileId=spreadsheetId).execute() # Delete 'test spreadsheet'
return True
@user_passes_test(lambda u: u.is_staff)
def export_to_sheet(request, tid):
system_settings = get_object_or_404(System_Settings, id=1)
google_sheets_enabled = system_settings.enable_google_sheets
if google_sheets_enabled is False:
raise PermissionDenied
test = Test.objects.get(id=tid)
spreadsheet_name = test.engagement.product.name + "-" + test.engagement.name + "-" + str(test.id)
service_account_info = json.loads(system_settings.credentials)
SCOPES = ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/spreadsheets']
credentials = service_account.Credentials.from_service_account_info(service_account_info, scopes=SCOPES)
try:
drive_service = googleapiclient.discovery.build('drive', 'v3', credentials=credentials, cache_discovery=False)
folder_id = system_settings.drive_folder_ID
gs_files = drive_service.files().list(q="mimeType='application/vnd.google-apps.spreadsheet' and parents in '%s' and name='%s'" % (folder_id, spreadsheet_name),
spaces='drive',
pageSize=10,
fields='files(id, name)').execute()
spreadsheets = gs_files.get('files')
if len(spreadsheets) == 1:
spreadsheetId = spreadsheets[0].get('id')
sync = sync_findings(request, tid, spreadsheetId)
errors = sync['errors']
sheet_title = sync['sheet_title']
if len(errors) > 0:
product_tab = Product_Tab(test.engagement.product.id, title="Syncing Errors", tab="engagements")
product_tab.setEngagement(test.engagement)
spreadsheet_url = 'https://docs.google.com/spreadsheets/d/' + spreadsheetId
return render(
request, 'dojo/syncing_errors.html', {
'test': test,
'errors': errors,
'name': 'Google Drive Sync Errors',
'product_tab': product_tab,
'sheet_title': sheet_title,
'spreadsheet_name': spreadsheet_name,
'spreadsheet_url': spreadsheet_url
})
else:
messages.add_message(
request,
messages.SUCCESS,
"Synched Google Sheet with database.",
extra_tags="alert-success",
)
return HttpResponseRedirect(reverse('view_test', args=(tid, )))
elif len(spreadsheets) == 0:
create_googlesheet(request, tid)
messages.add_message(
request,
messages.SUCCESS,
"Successfully exported finding details to Google Sheet.",
extra_tags="alert-success",
)
return HttpResponseRedirect(reverse('view_test', args=(tid, )))
else:
messages.add_message(
request,
messages.ERROR,
"More than one Google Sheet exists for this test. Please contact your system admin to solve the issue.",
extra_tags="alert-danger",
)
return HttpResponseRedirect(reverse('view_test', args=(tid, )))
except httplib2.ServerNotFoundError:
error_message = 'Unable to reach the Google Sheet API.'
return render(request, 'google_sheet_error.html', {'error_message': error_message})
except googleapiclient.errors.HttpError as error:
error_message = 'There is a problem with the Google Sheets Sync Configuration. Contact your system admin to solve the issue.'
return render(request, 'google_sheet_error.html', {'error_message': error_message})
except Exception as e:
error_message = e
return render(request, 'google_sheet_error.html', {'error_message': error_message})
def create_googlesheet(request, tid):
user_email = request.user.email
if not user_email:
raise Exception('User must have an email address to use this feature.')
test = Test.objects.get(id=tid)
system_settings = get_object_or_404(System_Settings, id=1)
service_account_info = json.loads(system_settings.credentials)
SCOPES = ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/spreadsheets']
credentials = service_account.Credentials.from_service_account_info(service_account_info, scopes=SCOPES)
sheets_service = googleapiclient.discovery.build('sheets', 'v4', credentials=credentials, cache_discovery=False)
drive_service = googleapiclient.discovery.build('drive', 'v3', credentials=credentials, cache_discovery=False)
# Create a new spreadsheet
spreadsheet_name = test.engagement.product.name + "-" + test.engagement.name + "-" + str(test.id)
spreadsheet = {
'properties': {
'title': spreadsheet_name
}
}
spreadsheet = sheets_service.spreadsheets().create(body=spreadsheet, fields='spreadsheetId').execute()
spreadsheetId = spreadsheet.get('spreadsheetId')
folder_id = system_settings.drive_folder_ID
# Move the spreadsheet inside the drive folder
file = drive_service.files().get(fileId=spreadsheetId, fields='parents').execute()
previous_parents = ",".join(file.get('parents'))
file = drive_service.files().update(fileId=spreadsheetId,
addParents=folder_id,
removeParents=previous_parents,
fields='id, parents').execute()
# Share created Spreadsheet with current user
drive_service.permissions().create(body={'type': 'user', 'role': 'writer', 'emailAddress': user_email}, fileId=spreadsheetId).execute()
populate_sheet(tid, spreadsheetId)
def sync_findings(request, tid, spreadsheetId):
test = Test.objects.get(id=tid)
system_settings = get_object_or_404(System_Settings, id=1)
service_account_info = json.loads(system_settings.credentials)
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
credentials = service_account.Credentials.from_service_account_info(service_account_info, scopes=SCOPES)
sheets_service = googleapiclient.discovery.build('sheets', 'v4', credentials=credentials, cache_discovery=False)
res = {}
spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=spreadsheetId).execute()
sheet_names = []
for sheet in spreadsheet['sheets']:
date = (sheet['properties']['title'])
try:
date = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
sheet_names.append(date)
except:
pass
try:
sheet_title = str(max(sheet_names))
except:
raise Exception('Existing Google Spreadsheet has errors. Delete the speadsheet and export again.')
res['sheet_title'] = sheet_title
result = sheets_service.spreadsheets().values().get(spreadsheetId=spreadsheetId, range=sheet_title).execute()
rows = result.get('values', [])
header_raw = rows[0]
findings_sheet = rows[1:]
findings_db = Finding.objects.filter(test=test).order_by('numerical_severity')
column_details = json.loads(system_settings.column_widths.replace("'", '"'))
active_note_types = Note_Type.objects.filter(is_active=True)
note_type_activation = len(active_note_types)
errors = []
index_of_active = header_raw.index('active')
index_of_verified = header_raw.index('verified')
index_of_duplicate = header_raw.index('duplicate')
index_of_false_p = header_raw.index('false_p')
index_of_id = header_raw.index('id')
for finding_sheet in findings_sheet:
finding_id = finding_sheet[index_of_id]
active = finding_sheet[index_of_active]
verified = finding_sheet[index_of_verified]
duplicate = finding_sheet[index_of_duplicate]
false_p = finding_sheet[index_of_false_p]
if (active == 'TRUE' or verified == 'TRUE') and duplicate == 'TRUE': # Check update finding conditions
error = 'Duplicate findings cannot be verified or active'
errors.append({'finding_id': finding_id, 'column_names': 'active, verified, duplicate', 'error': error})
elif false_p == 'TRUE' and verified == 'TRUE':
error = 'False positive findings cannot be verified.'
errors.append({'finding_id': finding_id, 'column_names': 'false_p, verified', 'error': error})
else:
try:
finding_db = findings_db.get(id=finding_id) # Update finding attributes
except:
if finding_id is None:
finding_id = 'Null'
error = 'Finding does not belong to the Test'
errors.append({'finding_id': finding_id, 'column_names': 'id', 'error': error})
else:
finding_notes = finding_db.notes.all()
for column_name in header_raw:
if column_name in column_details:
if int(column_details[column_name][1]) == 0:
index_of_column = header_raw.index(column_name)
if finding_sheet[index_of_column] == 'TRUE':
setattr(finding_db, column_name, True)
elif finding_sheet[index_of_column] == 'FALSE':
setattr(finding_db, column_name, False)
else:
setattr(finding_db, column_name, finding_sheet[index_of_column])
elif column_name[:6] == '[note]' and column_name[-3:] == '_id': # Updating notes
note_column_name = column_name[:-3]
try:
index_of_note_column = header_raw.index(note_column_name)
except ValueError:
pass
else:
index_of_id_column = header_raw.index(column_name)
note_id = finding_sheet[index_of_id_column]
note_entry = finding_sheet[index_of_note_column].rstrip()
if note_entry != '':
if note_id != '': # If the note is an existing one
note_db = finding_notes.get(id=note_id)
if note_entry != note_db.entry.rstrip():
note_db.entry = note_entry
note_db.edited = True
note_db.editor = request.user
note_db.edit_time = timezone.now()
history = NoteHistory(data=note_db.entry,
time=note_db.edit_time,
current_editor=note_db.editor)
history.save()
note_db.history.add(history)
note_db.save()
else: # If the note is a newly added one
if note_type_activation:
if note_column_name[7:12] == 'Note_':
error = 'Can not add new notes without a note-type. Add your note under the correct note-type column'
errors.append({'finding_id': finding_id, 'column_names': note_column_name, 'error': error})
else:
note_type_name = note_column_name[7:][:-2]
try:
note_type = active_note_types.get(name=note_type_name)
except:
try:
note_type = Note_Type.objects.get(name=note_type_name)
except:
pass
else:
error = '"' + note_type_name + '" Note-type is disabled. Cannot add new notes of "' + note_type_name + '" type'
errors.append({'finding_id': finding_id, 'column_names': note_column_name, 'error': error})
else:
new_note = Notes(note_type=note_type,
entry=note_entry,
date=timezone.now(),
author=request.user)
new_note.save()
history = NoteHistory(data=new_note.entry,
time=new_note.date,
current_editor=new_note.author,
note_type=new_note.note_type)
history.save()
new_note.history.add(history)
finding_db.notes.add(new_note)
else:
if note_column_name[7:12] == 'Note_':
new_note = Notes(entry=note_entry,
date=timezone.now(),
author=request.user)
new_note.save()
history = NoteHistory(data=new_note.entry,
time=new_note.date,
current_editor=new_note.author)
history.save()
new_note.history.add(history)
finding_db.notes.add(new_note)
else:
error_location = finding_id + ' ' + note_column_name
error = 'Note-types are not enabled. Notes cannot have a note-type.'
errors.append({'finding_id': finding_id, 'column_names': note_column_name, 'error': error})
finding_db.save()
res['errors'] = errors
populate_sheet(tid, spreadsheetId)
return res
def populate_sheet(tid, spreadsheetId):
system_settings = get_object_or_404(System_Settings, id=1)
service_account_info = json.loads(system_settings.credentials)
service_account_email = service_account_info['client_email']
email_address = system_settings.email_address
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
credentials = service_account.Credentials.from_service_account_info(service_account_info, scopes=SCOPES)
sheets_service = googleapiclient.discovery.build('sheets', 'v4', credentials=credentials, cache_discovery=False)
findings_list = get_findings_list(tid)
row_count = len(findings_list)
column_count = len(findings_list[0])
# Create new sheet in the spreadsheet
now = datetime.datetime.now()
sheet_title = now.strftime("%Y-%m-%d %H:%M:%S")
new_sheet = {
"requests": [{
"addSheet": {
"properties": {
"title": sheet_title,
"gridProperties": {
"rowCount": row_count,
"columnCount": column_count
}
}
}
}]
}
sheets_service.spreadsheets().batchUpdate(spreadsheetId=spreadsheetId, body=new_sheet).execute()
# Move new sheet to the left most corner
spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=spreadsheetId).execute()
for sheet in spreadsheet['sheets']:
if sheet['properties']['title'] == sheet_title:
sheet_id = sheet['properties']['sheetId']
break
reqs = {
'requests': [
{'updateSheetProperties': {
'properties': {
'sheetId': sheet_id,
'index': 0
},
"fields": "index"
}}
]}
sheets_service.spreadsheets().batchUpdate(spreadsheetId=spreadsheetId, body=reqs).execute()
# Update created sheet with finding details
result = sheets_service.spreadsheets().values().update(spreadsheetId=spreadsheetId,
range=sheet_title,
valueInputOption='RAW',
body={'values': findings_list}).execute()
# Format the header row
body = {
"requests": [
{
"repeatCell": {
"range": {
"sheetId": sheet_id,
"startRowIndex": 0,
"endRowIndex": 1
},
"cell": {
"userEnteredFormat": {
"backgroundColor": {
"red": 0.0,
"green": 0.0,
"blue": 0.0
},
"horizontalAlignment": "CENTER",
"textFormat": {
"foregroundColor": {
"red": 1.0,
"green": 1.0,
"blue": 1.0
},
"fontSize": 12,
"bold": True
}
}
},
"fields": "userEnteredFormat(backgroundColor,textFormat,horizontalAlignment)"
}
},
{
"updateSheetProperties": {
"properties": {
"sheetId": sheet_id,
"gridProperties": {
"frozenRowCount": 1
}
},
"fields": "gridProperties.frozenRowCount"
}
},
{
"addProtectedRange": {
"protectedRange": {
"range": {
"sheetId": sheet_id,
"startRowIndex": 0,
"endRowIndex": 1,
"startColumnIndex": 0,
"endColumnIndex": column_count,
},
"editors": {
"users": [
service_account_email,
email_address
]
},
# "description": "Protecting total row",
"warningOnly": False
}
}
}
]
}
sheets_service.spreadsheets().batchUpdate(spreadsheetId=spreadsheetId, body=body).execute()
# Format columns with input field widths and protect columns
range = sheet_title + '!1:1'
result = sheets_service.spreadsheets().values().get(spreadsheetId=spreadsheetId, range=range).execute()
rows = result.get('values', [])
header_raw = rows[0]
fields = Finding._meta.fields
column_details = json.loads(system_settings.column_widths.replace("'", '"'))
body = {}
body["requests"] = []
for column_name in header_raw:
index_of_column = header_raw.index(column_name)
if column_name in column_details:
# If column width is 0 hide column
if int(column_details[column_name][0]) == 0:
body["requests"].append({
"updateDimensionProperties": {
"range": {
"sheetId": sheet_id,
"dimension": "COLUMNS",
"startIndex": index_of_column,
"endIndex": index_of_column + 1
},
"properties": {
"hiddenByUser": True,
},
"fields": "hiddenByUser"
}
})
else:
# If column width is not 0 adjust column to given width
body["requests"].append({
"updateDimensionProperties": {
"range": {
"sheetId": sheet_id,
"dimension": "COLUMNS",
"startIndex": index_of_column,
"endIndex": index_of_column + 1
},
"properties": {
"pixelSize": column_details[column_name][0]
},
"fields": "pixelSize"
}
})
# If protect column is true, protect in sheet
if column_details[column_name][1] == 1:
body["requests"].append({
"addProtectedRange": {
"protectedRange": {
"range": {
"sheetId": sheet_id,
"startRowIndex": 1,
"endRowIndex": row_count,
"startColumnIndex": index_of_column,
"endColumnIndex": index_of_column + 1,
},
"editors": {
"users": [
service_account_email,
email_address
]
},
"warningOnly": False
}
}
})
# Format boolean fields in the google sheet
if (fields[index_of_column].get_internal_type()) == "BooleanField":
body["requests"].append({
"setDataValidation": {
"range": {
"sheetId": sheet_id,
"startRowIndex": 1,
"endRowIndex": row_count,
"startColumnIndex": index_of_column,
"endColumnIndex": index_of_column + 1,
},
"rule": {
"condition": {
"type": "BOOLEAN",
},
"inputMessage": "Value must be BOOLEAN",
"strict": True
}
}
})
# Format integer fields in the google sheet
elif (fields[index_of_column].get_internal_type()) == "IntegerField":
body["requests"].append({
"setDataValidation": {
"range": {
"sheetId": sheet_id,
"startRowIndex": 1,
"endRowIndex": row_count,
"startColumnIndex": index_of_column,
"endColumnIndex": index_of_column + 1,
},
"rule": {
"condition": {
"type": "NUMBER_GREATER",
"values": [
{
"userEnteredValue": "-1"
}
]
},
"inputMessage": "Value must be an integer",
"strict": True
}
}
})
# Format date fields in the google sheet
elif (fields[index_of_column].get_internal_type()) == "DateField":
body["requests"].append({
"setDataValidation": {
"range": {
"sheetId": sheet_id,
"startRowIndex": 1,
"endRowIndex": row_count,
"startColumnIndex": index_of_column,
"endColumnIndex": index_of_column + 1,
},
"rule": {
"condition": {
"type": "DATE_IS_VALID",
},
"inputMessage": "Value must be a valid date",
"strict": True
}
}
})
# Make severity column a dropdown
elif column_name == "severity":
body["requests"].append({
"setDataValidation": {
"range": {
"sheetId": sheet_id,
"startRowIndex": 1,
"endRowIndex": row_count,
"startColumnIndex": index_of_column,
"endColumnIndex": index_of_column + 1,
},
"rule": {
"condition": {
"type": "ONE_OF_LIST",
"values": [
{"userEnteredValue": "Info"},
{"userEnteredValue": "Low"},
{"userEnteredValue": "Medium"},
{"userEnteredValue": "High"},
{"userEnteredValue": "Critical"},
]
},
"inputMessage": "Value must be an one of list",
"strict": True
}
}
})
# Hide and protect note id columns and last column
elif (column_name[:6] == '[note]' and column_name[-3:] == '_id') or column_name == 'Last column':
body["requests"].append({
"updateDimensionProperties": {
"range": {
"sheetId": sheet_id,
"dimension": "COLUMNS",
"startIndex": index_of_column,
"endIndex": index_of_column + 1
},
"properties": {
"hiddenByUser": True,
},
"fields": "hiddenByUser"
}
})
body["requests"].append({
"addProtectedRange": {
"protectedRange": {
"range": {
"sheetId": sheet_id,
"startRowIndex": 1,
"endRowIndex": row_count,
"startColumnIndex": index_of_column,
"endColumnIndex": index_of_column + 1,
},
"editors": {
"users": [
service_account_email,
email_address
]
},
"warningOnly": False
}
}
})
elif column_name[:6] == '[note]' or column_name[:11] == '[duplicate]':
body["requests"].append({
"autoResizeDimensions": {
"dimensions": {
"sheetId": sheet_id,
"dimension": "COLUMNS",
"startIndex": index_of_column,
"endIndex": index_of_column + 1
}
}
})
sheets_service.spreadsheets().batchUpdate(spreadsheetId=spreadsheetId, body=body).execute()
def get_findings_list(tid):
test = Test.objects.get(id=tid)
system_settings = get_object_or_404(System_Settings, id=1)
findings = Finding.objects.filter(test=test).order_by('numerical_severity')
active_note_types = Note_Type.objects.filter(is_active=True).order_by('id')
note_type_activation = active_note_types.count()
# Create the header row
fields = Finding._meta.fields
findings_list = []
headings = []
for i in fields:
headings.append(i.name)
findings_list.append(headings)
# Create finding rows
for finding in findings:
finding_details = []
for field in fields:
value = getattr(finding, field.name)
if type(value) == datetime.date or type(value) == Test or type(value) == datetime.datetime:
var = str(value)
elif type(value) == User or type(value) == Dojo_User:
var = value.username
elif type(value) == Finding:
var = value.id
elif type(value) == Sonarqube_Issue:
var = value.key
else:
var = value
finding_details.append(var)
findings_list.append(finding_details)
# Add notes into the findings_list
if note_type_activation:
for note_type in active_note_types:
max_note_count = 1
if note_type.is_single:
findings_list[0].append('[note] ' + note_type.name + '_1_id')
findings_list[0].append('[note] ' + note_type.name + '_1')
else:
for finding in findings:
note_count = finding.notes.filter(note_type=note_type).count()
if max_note_count < note_count:
max_note_count = note_count
for n in range(max_note_count):
findings_list[0].append('[note] ' + note_type.name + '_' + str(n + 1) + '_id')
findings_list[0].append('[note] ' + note_type.name + '_' + str(n + 1))
for f in range(findings.count()):
finding = findings[f]
notes = finding.notes.filter(note_type=note_type).order_by('id')
for note in notes:
findings_list[f + 1].append(note.id)
findings_list[f + 1].append(note.entry)
missing_notes_count = max_note_count - notes.count()
for i in range(missing_notes_count):
findings_list[f + 1].append('')
findings_list[f + 1].append('')
max_note_count = 0
for finding in findings:
note_count = finding.notes.exclude(note_type__in=active_note_types).count()
if max_note_count < note_count:
max_note_count = note_count
if max_note_count > 0:
for i in range(max_note_count):
findings_list[0].append('[note] ' + "Note_" + str(i + 1) + '_id')
findings_list[0].append('[note] ' + "Note_" + str(i + 1))
for f in range(findings.count()):
finding = findings[f]
notes = finding.notes.exclude(note_type__in=active_note_types).order_by('id')
for note in notes:
findings_list[f + 1].append(note.id)
findings_list[f + 1].append(note.entry)
missing_notes_count = max_note_count - notes.count()
for i in range(missing_notes_count):
findings_list[f + 1].append('')
findings_list[f + 1].append('')
else:
max_note_count = 1
for finding in findings:
note_count = len(finding.notes.all())
if note_count > max_note_count:
max_note_count = note_count
for i in range(max_note_count):
findings_list[0].append('[note] ' + "Note_" + str(i + 1) + '_id')
findings_list[0].append('[note] ' + "Note_" + str(i + 1))
for f in range(findings.count()):
finding = findings[f]
notes = finding.notes.all().order_by('id')
for note in notes:
findings_list[f + 1].append(note.id)
findings_list[f + 1].append(note.entry)
missing_notes_count = max_note_count - notes.count()
for i in range(missing_notes_count):
findings_list[f + 1].append('')
findings_list[f + 1].append('')
if system_settings.enable_deduplication:
if note_type_activation:
for note_type in active_note_types:
findings_list[0].append('[duplicate] ' + note_type.name)
for f in range(findings.count()):
original_finding = findings[f].duplicate_finding
for note_type in active_note_types:
try:
note = original_finding.notes.filter(note_type=note_type).latest('date')
findings_list[f + 1].append(note.entry)
except:
findings_list[f + 1].append('')
else:
findings_list[0].append('[duplicate] note')
for f in range(findings.count()):
original_finding = findings[f].duplicate_finding
try:
note = original_finding.notes.latest('date')
findings_list[f + 1].append(note.entry)
except:
findings_list[f + 1].append('')
findings_list[0].append('Last column')
for f in range(findings.count()):
findings_list[f + 1].append('-')
return findings_list
| |
# -*- coding: utf-8 -*-
"""
Display upcoming Google Calendar events.
This module will display information about upcoming Google Calendar events
in one of two formats which can be toggled with a button press. The event
URL may also be opened in a web browser with a button press.
Configuration parameters:
auth_token: The path to where the access/refresh token will be saved
after successful credential authorization.
(default '~/.config/py3status/google_calendar.auth_token')
blacklist_events: Event names in this list will not be shown in the module
(case insensitive).
(default [])
button_open: Opens the event URL in the default web browser.
(default 3)
button_refresh: Refreshes the module and updates the list of events.
(default 2)
button_toggle: Toggles a boolean to hide/show the data for each event.
(default 1)
cache_timeout: How often the module is refreshed in seconds
(default 60)
client_secret: the path to your client_secret file which
contains your OAuth 2.0 credentials.
(default '~/.config/py3status/google_calendar.client_secret')
events_within_hours: Select events within the next given hours.
(default 12)
force_lowercase: Sets whether to force all event output to lower case.
(default False)
format: The format for module output.
(default '{events}|\?color=event \u2687')
format_date: The format for date related format placeholders.
May be any Python strftime directives for dates.
(default '%a %d-%m')
format_event: The format for each event. The information can be toggled
with 'button_toggle' based on the value of 'is_toggled'.
(default '[\?color=event {summary}][\?if=is_toggled ({start_time}
- {end_time}, {start_date})|[\?if=location ({location})]
[{format_timer}]]'
format_notification: The format for event warning notifications.
(default '{summary} {start_time} - {end_time}')
format_separator: The string used to separate individual events.
(default ' \| ')
format_time: The format for time-related placeholders except `{format_timer}`.
May use any Python strftime directives for times.
(default '%I:%M %p')
format_timer: The format used for the {format_timer} placeholder to display
time until an event starts or time until an event in progress is over.
(default '\?color=time ([\?if=days {days}d ][\?if=hours {hours}h ]
[\?if=minutes {minutes}m]) [\?if=is_current left]')
ignore_all_day_events: Sets whether to display all day events or not.
(default False)
num_events: The maximum number of events to display.
(default 3)
response: Only display events for which the response status is
on the list. (default ['accepted'])
thresholds: Thresholds for events. The first entry is the color for event 1,
the second for event 2, and so on.
(default [])
time_to_max: Threshold (in minutes) for when to display the `{format_timer}`
string; e.g. if time_to_max is 60, `{format_timer}` will only be
displayed for events starting in 60 minutes or less.
(default 180)
warn_threshold: The number of minutes until an event starts before a
warning is displayed to notify the user; e.g. if warn_threshold is 30
and an event is starting in 30 minutes or less, a notification will be
displayed. disabled by default.
(default 0)
warn_timeout: The number of seconds before a warning should be issued again.
(default 300)
Control placeholders:
{is_toggled} a boolean toggled by button_toggle
Format placeholders:
{events} All the events to display.
format_event and format_notification placeholders:
{description} The description for the calendar event.
{end_date} The end date for the event.
{end_time} The end time for the event.
{location} The location for the event.
{start_date} The start date for the event.
{start_time} The start time for the event.
{summary} The summary (i.e. title) for the event.
{format_timer} The time until the event starts (or until it is over
if already in progress).
format_timer placeholders:
{days} The number of days until the event.
{hours} The number of hours until the event.
{minutes} The number of minutes until the event.
Color options:
color_event: Color for a single event.
color_time: Color for the time associated with each event.
Requires:
1. Python library google-api-python-client.
2. Python library python-dateutil.
3. OAuth 2.0 credentials for the Google Calendar api.
Follow Step 1 of the guide here to obtain your OAuth 2.0 credentials:
https://developers.google.com/google-apps/calendar/quickstart/python
Download the client_secret.json file which contains your client ID and
client secret. In your i3status config file, set configuration parameter
client_secret to the path to your client_secret.json file.
The first time you run the module, a browser window will open asking you
to authorize access to your calendar. After authorization is complete,
an access/refresh token will be saved to the path configured in
auth_token, and i3status will be restarted. This restart will
occur only once after the first time you successfully authorize.
Examples:
```
# add color gradients for events and dates/times
google_calendar {
thresholds = {
'event': [(1, '#d0e6ff'), (2, '#bbdaff'), (3, '#99c7ff'),
(4, '#86bcff'), (5, '#62a9ff'), (6, '#8c8cff'), (7, '#7979ff')],
'time': [(1, '#ffcece'), (2, '#ffbfbf'), (3, '#ff9f9f'),
(4, '#ff7f7f'), (5, '#ff5f5f'), (6, '#ff3f3f'), (7, '#ff1f1f')]
}
}
```
@author Igor Grebenkov
@license BSD
SAMPLE OUTPUT
[
{'full_text':'Homer's Birthday (742 Evergreen Terrace) (1h 23m) | '},
{'full_text':'Doctor's Appointment | Lunch with John'},
]
"""
import httplib2
import os
import datetime
from apiclient import discovery
from oauth2client import client
from oauth2client import clientsecrets
from oauth2client import tools
from oauth2client.file import Storage
from httplib2 import ServerNotFoundError
from dateutil import parser
from dateutil.tz import tzlocal
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
APPLICATION_NAME = 'py3status google_calendar module'
class Py3status:
"""
"""
auth_token = '~/.config/py3status/google_calendar.auth_token'
blacklist_events = []
button_open = 3
button_refresh = 2
button_toggle = 1
cache_timeout = 60
client_secret = '~/.config/py3status/google_calendar.client_secret'
events_within_hours = 12
force_lowercase = False
format = '{events}|\?color=event \u2687'
format_date = '%a %d-%m'
format_event = '[\?color=event {summary}][\?if=is_toggled ({start_time}' +\
' - {end_time}, {start_date})|[ ({location})][ {format_timer}]]'
format_notification = '{summary} {start_time} - {end_time}'
format_separator = ' \| '
format_time = '%I:%M %p'
format_timer = '\?color=time ([\?if=days {days}d ][\?if=hours {hours}h ]' +\
'[\?if=minutes {minutes}m])[\?if=is_current left]'
ignore_all_day_events = False
num_events = 3
response = ['accepted']
thresholds = []
time_to_max = 180
warn_threshold = 0
warn_timeout = 300
def post_config_hook(self):
self.button_states = [False] * self.num_events
self.events = None
self.no_update = False
self.client_secret = os.path.expanduser(self.client_secret)
self.auth_token = os.path.expanduser(self.auth_token)
self.credentials = self._get_credentials()
self.is_authorized = False
self.first_run = True
def _get_credentials(self):
"""
Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns: Credentials, the obtained credential.
"""
client_secret_path = os.path.dirname(self.client_secret)
auth_token_path = os.path.dirname(self.auth_token)
if not os.path.exists(auth_token_path):
os.makedirs(auth_token_path)
if not os.path.exists(client_secret_path):
os.makedirs(client_secret_path)
flags = tools.argparser.parse_args(args=[])
store = Storage(self.auth_token)
credentials = store.get()
if not credentials or credentials.invalid:
try:
flow = client.flow_from_clientsecrets(self.client_secret,
SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
except clientsecrets.InvalidClientSecretsError:
raise Exception('missing client_secret')
"""
Have to restart i3 after getting credentials to prevent bad output.
This only has to be done once on the first run of the module.
"""
self.py3.command_run('i3-msg restart')
return credentials
def _authorize_credentials(self):
"""
Fetches an access/refresh token by authorizing OAuth 2.0 credentials.
Returns: True, if the authorization was successful.
False, if a ServerNotFoundError is thrown.
"""
try:
http = self.credentials.authorize(httplib2.Http())
self.service = discovery.build('calendar', 'v3', http=http)
return True
except ServerNotFoundError:
return False
def _get_events(self):
"""
Fetches events from the calendar into a list.
Returns: The list of events.
"""
self.last_update = datetime.datetime.now()
time_min = datetime.datetime.utcnow()
time_max = time_min + datetime.timedelta(hours=self.events_within_hours)
events = []
try:
eventsResult = self.service.events().list(
calendarId='primary',
timeMax=time_max.isoformat() + 'Z', # 'Z' indicates UTC time
timeMin=time_min.isoformat() + 'Z', # 'Z' indicates UTC time
singleEvents=True,
orderBy='startTime').execute(num_retries=5)
except Exception:
return self.events or events
else:
for event in eventsResult.get('items', []):
# filter out events that we did not accept (default)
# unless we organized them with no attendees
i_organized = event.get('organizer', {}).get('self', False)
has_attendees = event.get('attendees', [])
for attendee in event.get('attendees', []):
if attendee.get('self') is True:
if attendee[
'responseStatus'] in self.response:
break
else:
# we did not organize the event or we did not accept it
if not i_organized or has_attendees:
continue
# strip and lower case output if needed
for key in ['description', 'location', 'summary']:
event[key] = event.get(key, '').strip()
if self.force_lowercase is True:
event[key] = event[key].lower()
# ignore all day events if configured
if event['start'].get('date') is not None:
if self.ignore_all_day_events:
continue
# filter out blacklisted event names
if event['summary'] is not None:
if event['summary'].lower() \
in map(lambda e: e.lower(), self.blacklist_events):
continue
events.append(event)
return events[:self.num_events]
def _check_warn_threshold(self, time_to, event_dict):
"""
Checks if the time until an event starts is less than or equal to the
warn_threshold. If True, issue a warning with self.py3.notify_user.
"""
if time_to['total_minutes'] <= self.warn_threshold:
warn_message = self.py3.safe_format(self.format_notification,
event_dict)
self.py3.notify_user(warn_message, 'warning', self.warn_timeout)
def _gstr_to_date(self, date_str):
""" Returns a dateime object from calendar date string."""
return parser.parse(date_str).replace(tzinfo=tzlocal())
def _gstr_to_datetime(self, date_time_str):
""" Returns a datetime object from calendar date/time string."""
return parser.parse(date_time_str)
def _datetime_to_str(self, date_time, dt_format):
""" Returns a strftime formatted string from a datetime object."""
return date_time.strftime(dt_format)
def _delta_time(self, date_time):
"""
Returns in a dict the number of days/hours/minutes and total minutes
until date_time.
"""
now = datetime.datetime.now(tzlocal())
diff = date_time - now
days = int(diff.days)
hours = int(diff.seconds / 3600)
minutes = int((diff.seconds / 60) - (hours * 60)) + 1
total_minutes = int((diff.seconds / 60) + (days * 24 * 60)) + 1
return {
'days': days,
'hours': hours,
'minutes': minutes,
'total_minutes': total_minutes
}
def _format_timedelta(self, index, time_delta, is_current):
"""
Formats the dict time_to containg days/hours/minutes until an
event starts into a composite according to time_to_formatted.
Returns: A formatted composite.
"""
time_delta_formatted = ''
if time_delta['total_minutes'] <= self.time_to_max:
time_delta_formatted = self.py3.safe_format(
self.format_timer, {
'days': time_delta['days'],
'hours': time_delta['hours'],
'minutes': time_delta['minutes'],
'is_current': is_current,
}
)
return time_delta_formatted
def _build_response(self):
"""
Builds the composite reponse to be output by the module by looping
through all events and formatting the necessary strings.
Returns: A composite containing the individual response for each event.
"""
responses = []
self.event_urls = []
for index, event in enumerate(self.events):
self.py3.threshold_get_color(index + 1, 'event')
self.py3.threshold_get_color(index + 1, 'time')
event_dict = {}
event_dict['summary'] = event.get('summary')
event_dict['location'] = event.get('location')
event_dict['description'] = event.get('description')
self.event_urls.append(event['htmlLink'])
if event['start'].get('date') is not None:
start_dt = self._gstr_to_date(event['start'].get('date'))
end_dt = self._gstr_to_date(event['end'].get('date'))
else:
start_dt = self._gstr_to_datetime(
event['start'].get('dateTime'))
end_dt = self._gstr_to_datetime(event['end'].get('dateTime'))
if end_dt < datetime.datetime.now(tzlocal()):
continue
event_dict['start_time'] = self._datetime_to_str(start_dt,
self.format_time)
event_dict['end_time'] = self._datetime_to_str(end_dt,
self.format_time)
event_dict['start_date'] = self._datetime_to_str(start_dt,
self.format_date)
event_dict['end_date'] = self._datetime_to_str(end_dt,
self.format_date)
time_delta = self._delta_time(start_dt)
if time_delta['days'] < 0:
time_delta = self._delta_time(end_dt)
is_current = True
else:
is_current = False
event_dict['format_timer'] = self._format_timedelta(
index, time_delta, is_current)
if self.warn_threshold > 0:
self._check_warn_threshold(time_delta, event_dict)
event_formatted = self.py3.safe_format(
self.format_event, {
'is_toggled': self.button_states[index],
'summary': event_dict['summary'],
'location': event_dict['location'],
'description': event_dict['description'],
'start_time': event_dict['start_time'],
'end_time': event_dict['end_time'],
'start_date': event_dict['start_date'],
'end_date': event_dict['end_date'],
'format_timer': event_dict['format_timer'],
}
)
self.py3.composite_update(event_formatted, {'index': index})
responses.append(event_formatted)
self.no_update = False
format_separator = self.py3.safe_format(self.format_separator)
self.py3.composite_update(format_separator, {'index': 'sep'})
responses = self.py3.composite_join(format_separator, responses)
return {'events': responses}
def google_calendar(self):
"""
The method that outputs the response.
First, we check credential authorization. If no authorization, we
display an error message, and try authorizing again in 5 seconds.
Otherwise, we fetch the events, build the response, and output
the resulting composite.
"""
composite = {}
if not self.is_authorized:
cached_until = 0
self.is_authorized = self._authorize_credentials()
else:
if not self.no_update:
self.events = self._get_events()
composite = self._build_response()
cached_until = self.cache_timeout
return {
'cached_until': self.py3.time_in(cached_until),
'composite': self.py3.safe_format(self.format, composite)
}
def on_click(self, event):
if self.is_authorized and self.events is not None:
"""
If button_refresh is clicked, we allow the events to be updated
if the last event update occured at least 1 second ago. This
prevents a bug that can crash py3status since refreshing the
module too fast results in incomplete event information being
fetched as _get_events() is called repeatedly.
Otherwise, we disable event updates.
"""
self.no_update = True
button = event['button']
button_index = event['index']
if button_index == 'sep':
self.py3.prevent_refresh()
elif button == self.button_refresh:
now = datetime.datetime.now()
diff = (now - self.last_update).seconds
if diff > 1:
self.no_update = False
elif button == self.button_toggle:
self.button_states[button_index] = \
not self.button_states[button_index]
elif button == self.button_open:
self.py3.command_run('xdg-open ' + self.event_urls[
button_index])
self.py3.prevent_refresh()
else:
self.py3.prevent_refresh()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Mikhail Yohman (@FragmentedPacket) <mikhail.yohman@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: netbox_ip_address
short_description: Creates or removes IP addresses from Netbox
description:
- Creates or removes IP addresses from Netbox
notes:
- Tags should be defined as a YAML list
- This should be ran with connection C(local) and hosts C(localhost)
author:
- Mikhail Yohman (@FragmentedPacket)
- Anthony Ruhier (@Anthony25)
requirements:
- pynetbox
version_added: '2.8'
options:
netbox_url:
description:
- URL of the Netbox instance resolvable by Ansible control host
required: true
netbox_token:
description:
- The token created within Netbox to authorize API access
required: true
data:
description:
- Defines the IP address configuration
suboptions:
family:
description:
- Specifies with address family the IP address belongs to
choices:
- 4
- 6
address:
description:
- Required if state is C(present)
prefix:
description:
- |
With state C(present), if an interface is given, it will ensure
that an IP inside this prefix (and vrf, if given) is attached
to this interface. Otherwise, it will get the next available IP
of this prefix and attach it.
With state C(new), it will force to get the next available IP in
this prefix. If an interface is given, it will also force to attach
it.
Required if state is C(present) or C(new) when no address is given.
Unused if an address is specified.
vrf:
description:
- VRF that IP address is associated with
tenant:
description:
- The tenant that the device will be assigned to
status:
description:
- The status of the IP address
choices:
- Active
- Reserved
- Deprecated
- DHCP
role:
description:
- The role of the IP address
choices:
- Loopback
- Secondary
- Anycast
- VIP
- VRRP
- HSRP
- GLBP
- CARP
interface:
description:
- |
The name and device of the interface that the IP address should be assigned to
Required if state is C(present) and a prefix specified.
description:
description:
- The description of the interface
nat_inside:
description:
- The inside IP address this IP is assigned to
tags:
description:
- Any tags that the IP address may need to be associated with
custom_fields:
description:
- must exist in Netbox
required: true
state:
description:
- |
Use C(present), C(new) or C(absent) for adding, force adding or removing.
C(present) will check if the IP is already created, and return it if
true. C(new) will force to create it anyway (useful for anycasts, for
example).
choices: [ absent, new, present ]
default: present
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
default: 'yes'
type: bool
'''
EXAMPLES = r'''
- name: "Test Netbox IP address module"
connection: local
hosts: localhost
gather_facts: False
tasks:
- name: Create IP address within Netbox with only required information
netbox_ip_address:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
address: 192.168.1.10
state: present
- name: Force to create (even if it already exists) the IP
netbox_ip_address:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
address: 192.168.1.10
state: new
- name: Get a new available IP inside 192.168.1.0/24
netbox_ip_address:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
prefix: 192.168.1.0/24
state: new
- name: Delete IP address within netbox
netbox_ip_address:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
address: 192.168.1.10
state: absent
- name: Create IP address with several specified options
netbox_ip_address:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
family: 4
address: 192.168.1.20
vrf: Test
tenant: Test Tenant
status: Reserved
role: Loopback
description: Test description
tags:
- Schnozzberry
state: present
- name: Create IP address and assign a nat_inside IP
netbox_ip_address:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
family: 4
address: 192.168.1.30
vrf: Test
nat_inside:
address: 192.168.1.20
vrf: Test
interface:
name: GigabitEthernet1
device: test100
- name: Ensure that an IP inside 192.168.1.0/24 is attached to GigabitEthernet1
netbox_ip_address:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
prefix: 192.168.1.0/24
vrf: Test
interface:
name: GigabitEthernet1
device: test100
state: present
- name: Attach a new available IP of 192.168.1.0/24 to GigabitEthernet1
netbox_ip_address:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
prefix: 192.168.1.0/24
vrf: Test
interface:
name: GigabitEthernet1
device: test100
state: new
'''
RETURN = r'''
ip_address:
description: Serialized object as created or already existent within Netbox
returned: on creation
type: dict
msg:
description: Message indicating failure or info about what has been achieved
returned: always
type: str
'''
import json
import traceback
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.net_tools.netbox.netbox_utils import (
find_ids,
normalize_data,
create_netbox_object,
delete_netbox_object,
update_netbox_object,
IP_ADDRESS_ROLE,
IP_ADDRESS_STATUS
)
from ansible.module_utils.compat import ipaddress
from ansible.module_utils._text import to_text
PYNETBOX_IMP_ERR = None
try:
import pynetbox
HAS_PYNETBOX = True
except ImportError:
PYNETBOX_IMP_ERR = traceback.format_exc()
HAS_PYNETBOX = False
def main():
'''
Main entry point for module execution
'''
argument_spec = dict(
netbox_url=dict(type="str", required=True),
netbox_token=dict(type="str", required=True, no_log=True),
data=dict(type="dict", required=True),
state=dict(required=False, default='present', choices=['present', 'absent', 'new']),
validate_certs=dict(type="bool", default=True)
)
global module
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Fail module if pynetbox is not installed
if not HAS_PYNETBOX:
module.fail_json(msg=missing_required_lib('pynetbox'), exception=PYNETBOX_IMP_ERR)
# Assign variables to be used with module
changed = False
app = 'ipam'
endpoint = 'ip_addresses'
url = module.params["netbox_url"]
token = module.params["netbox_token"]
data = module.params["data"]
state = module.params["state"]
validate_certs = module.params["validate_certs"]
# Attempt to create Netbox API object
try:
nb = pynetbox.api(url, token=token, ssl_verify=validate_certs)
except Exception:
module.fail_json(msg="Failed to establish connection to Netbox API")
try:
nb_app = getattr(nb, app)
except AttributeError:
module.fail_json(msg="Incorrect application specified: %s" % (app))
nb_endpoint = getattr(nb_app, endpoint)
norm_data = normalize_data(data)
try:
norm_data = _check_and_adapt_data(nb, norm_data)
if state in ("new", "present"):
return _handle_state_new_present(
module, state, nb_app, nb_endpoint, norm_data
)
elif state == "absent":
return module.exit_json(
**ensure_ip_address_absent(nb_endpoint, norm_data)
)
else:
return module.fail_json(msg="Invalid state %s" % state)
except pynetbox.RequestError as e:
return module.fail_json(msg=json.loads(e.error))
except ValueError as e:
return module.fail_json(msg=str(e))
def _check_and_adapt_data(nb, data):
data = find_ids(nb, data)
if data.get("vrf") and not isinstance(data["vrf"], int):
raise ValueError(
"%s does not exist - Please create VRF" % (data["vrf"])
)
if data.get("status"):
data["status"] = IP_ADDRESS_STATUS.get(data["status"].lower())
if data.get("role"):
data["role"] = IP_ADDRESS_ROLE.get(data["role"].lower())
return data
def _handle_state_new_present(module, state, nb_app, nb_endpoint, data):
if data.get("address"):
if state == "present":
return module.exit_json(
**ensure_ip_address_present(nb_endpoint, data)
)
elif state == "new":
return module.exit_json(
**create_ip_address(nb_endpoint, data)
)
else:
if state == "present":
return module.exit_json(
**ensure_ip_in_prefix_present_on_netif(
nb_app, nb_endpoint, data
)
)
elif state == "new":
return module.exit_json(
**get_new_available_ip_address(nb_app, data)
)
def ensure_ip_address_present(nb_endpoint, data):
"""
:returns dict(ip_address, msg, changed): dictionary resulting of the request,
where 'ip_address' is the serialized ip fetched or newly created in Netbox
"""
if not isinstance(data, dict):
changed = False
return {"msg": data, "changed": changed}
try:
nb_addr = _search_ip(nb_endpoint, data)
except ValueError:
return _error_multiple_ip_results(data)
result = {}
if not nb_addr:
return create_ip_address(nb_endpoint, data)
else:
ip_addr, diff = update_netbox_object(nb_addr, data, module.check_mode)
if ip_addr is False:
module.fail_json(
msg="Request failed, couldn't update IP: %s" % (data["address"])
)
if diff:
msg = "IP Address %s updated" % (data["address"])
changed = True
result["diff"] = diff
else:
ip_addr = nb_addr.serialize()
changed = False
msg = "IP Address %s already exists" % (data["address"])
return {"ip_address": ip_addr, "msg": msg, "changed": changed}
def _search_ip(nb_endpoint, data):
get_query_params = {"address": data["address"]}
if data.get("vrf"):
get_query_params["vrf_id"] = data["vrf"]
ip_addr = nb_endpoint.get(**get_query_params)
return ip_addr
def _error_multiple_ip_results(data):
changed = False
if "vrf" in data:
return {"msg": "Returned more than result", "changed": changed}
else:
return {
"msg": "Returned more than one result - Try specifying VRF.",
"changed": changed
}
def create_ip_address(nb_endpoint, data):
if not isinstance(data, dict):
changed = False
return {"msg": data, "changed": changed}
ip_addr, diff = create_netbox_object(nb_endpoint, data, module.check_mode)
changed = True
msg = "IP Addresses %s created" % (data["address"])
return {"ip_address": ip_addr, "msg": msg, "changed": changed, "diff": diff}
def ensure_ip_in_prefix_present_on_netif(nb_app, nb_endpoint, data):
"""
:returns dict(ip_address, msg, changed): dictionary resulting of the request,
where 'ip_address' is the serialized ip fetched or newly created in Netbox
"""
if not isinstance(data, dict):
changed = False
return {"msg": data, "changed": changed}
if not data.get("interface") or not data.get("prefix"):
raise ValueError("A prefix and interface are required")
get_query_params = {
"interface_id": data["interface"], "parent": data["prefix"],
}
if data.get("vrf"):
get_query_params["vrf_id"] = data["vrf"]
attached_ips = nb_endpoint.filter(**get_query_params)
if attached_ips:
ip_addr = attached_ips[-1].serialize()
changed = False
msg = "IP Address %s already attached" % (ip_addr["address"])
return {"ip_address": ip_addr, "msg": msg, "changed": changed}
else:
return get_new_available_ip_address(nb_app, data)
def get_new_available_ip_address(nb_app, data):
prefix_query = {"prefix": data["prefix"]}
if data.get("vrf"):
prefix_query["vrf_id"] = data["vrf"]
result = {}
prefix = nb_app.prefixes.get(**prefix_query)
if not prefix:
changed = False
msg = "%s does not exist - please create first" % (data["prefix"])
return {"msg": msg, "changed": changed}
elif prefix.available_ips.list():
ip_addr, diff = create_netbox_object(prefix.available_ips, data, module.check_mode)
changed = True
msg = "IP Addresses %s created" % (ip_addr["address"])
result["diff"] = diff
else:
changed = False
msg = "No available IPs available within %s" % (data['prefix'])
return {"msg": msg, "changed": changed}
result.update({"ip_address": ip_addr, "msg": msg, "changed": changed})
return result
def _get_prefix_id(nb_app, prefix, vrf_id=None):
ipaddr_prefix = ipaddress.ip_network(prefix)
network = to_text(ipaddr_prefix.network_address)
mask = ipaddr_prefix.prefixlen
prefix_query_params = {
"prefix": network,
"mask_length": mask
}
if vrf_id:
prefix_query_params["vrf_id"] = vrf_id
prefix_id = nb_app.prefixes.get(prefix_query_params)
if not prefix_id:
if vrf_id:
raise ValueError("Prefix %s does not exist in VRF %s - Please create it" % (prefix, vrf_id))
else:
raise ValueError("Prefix %s does not exist - Please create it" % (prefix))
return prefix_id
def ensure_ip_address_absent(nb_endpoint, data):
"""
:returns dict(msg, changed)
"""
if not isinstance(data, dict):
changed = False
return {"msg": data, "changed": changed}
try:
ip_addr = _search_ip(nb_endpoint, data)
except ValueError:
return _error_multiple_ip_results(data)
result = {}
if ip_addr:
dummy, diff = delete_netbox_object(ip_addr, module.check_mode)
changed = True
msg = "IP Address %s deleted" % (data["address"])
result["diff"] = diff
else:
changed = False
msg = "IP Address %s already absent" % (data["address"])
result.update({"msg": msg, "changed": changed})
return result
if __name__ == "__main__":
main()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, IO, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceOperations:
"""ServiceOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.storage.blob.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def set_properties(
self,
storage_service_properties: "_models.StorageServiceProperties",
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs: Any
) -> None:
"""Sets properties for a storage account's Blob service endpoint, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param storage_service_properties: The StorageService properties.
:type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "properties"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.set_properties.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(storage_service_properties, 'StorageServiceProperties', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
if cls:
return cls(pipeline_response, None, response_headers)
set_properties.metadata = {'url': '/'} # type: ignore
async def get_properties(
self,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs: Any
) -> "_models.StorageServiceProperties":
"""gets the properties of a storage account's Blob service, including properties for Storage
Analytics and CORS (Cross-Origin Resource Sharing) rules.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageServiceProperties, or the result of cls(response)
:rtype: ~azure.storage.blob.models.StorageServiceProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "properties"
accept = "application/xml"
# Construct URL
url = self.get_properties.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
deserialized = self._deserialize('StorageServiceProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_properties.metadata = {'url': '/'} # type: ignore
async def get_statistics(
self,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs: Any
) -> "_models.StorageServiceStats":
"""Retrieves statistics related to replication for the Blob service. It is only available on the
secondary location endpoint when read-access geo-redundant replication is enabled for the
storage account.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageServiceStats, or the result of cls(response)
:rtype: ~azure.storage.blob.models.StorageServiceStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageServiceStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "stats"
accept = "application/xml"
# Construct URL
url = self.get_statistics.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('StorageServiceStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_statistics.metadata = {'url': '/'} # type: ignore
async def list_containers_segment(
self,
prefix: Optional[str] = None,
marker: Optional[str] = None,
maxresults: Optional[int] = None,
include: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] = None,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs: Any
) -> "_models.ListContainersSegmentResponse":
"""The List Containers Segment operation returns a list of the containers under the specified
account.
:param prefix: Filters the results to return only containers whose name begins with the
specified prefix.
:type prefix: str
:param marker: A string value that identifies the portion of the list of containers to be
returned with the next listing operation. The operation returns the NextMarker value within the
response body if the listing operation did not return all containers remaining to be listed
with the current page. The NextMarker value can be used as the value for the marker parameter
in a subsequent call to request the next page of list items. The marker value is opaque to the
client.
:type marker: str
:param maxresults: Specifies the maximum number of containers to return. If the request does
not specify maxresults, or specifies a value greater than 5000, the server will return up to
5000 items. Note that if the listing operation crosses a partition boundary, then the service
will return a continuation token for retrieving the remainder of the results. For this reason,
it is possible that the service will return fewer results than specified by maxresults, or than
the default of 5000.
:type maxresults: int
:param include: Include this parameter to specify that the container's metadata be returned as
part of the response body.
:type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType]
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListContainersSegmentResponse, or the result of cls(response)
:rtype: ~azure.storage.blob.models.ListContainersSegmentResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainersSegmentResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "list"
accept = "application/xml"
# Construct URL
url = self.list_containers_segment.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if prefix is not None:
query_parameters['prefix'] = self._serialize.query("prefix", prefix, 'str')
if marker is not None:
query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
if include is not None:
query_parameters['include'] = self._serialize.query("include", include, '[str]', div=',')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
deserialized = self._deserialize('ListContainersSegmentResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
list_containers_segment.metadata = {'url': '/'} # type: ignore
async def get_user_delegation_key(
self,
key_info: "_models.KeyInfo",
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs: Any
) -> "_models.UserDelegationKey":
"""Retrieves a user delegation key for the Blob service. This is only a valid operation when using
bearer token authentication.
:param key_info: Key information.
:type key_info: ~azure.storage.blob.models.KeyInfo
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: UserDelegationKey, or the result of cls(response)
:rtype: ~azure.storage.blob.models.UserDelegationKey
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserDelegationKey"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "service"
comp = "userdelegationkey"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.get_user_delegation_key.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(key_info, 'KeyInfo', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('UserDelegationKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_user_delegation_key.metadata = {'url': '/'} # type: ignore
async def get_account_info(
self,
**kwargs: Any
) -> None:
"""Returns the sku name and account kind.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "account"
comp = "properties"
accept = "application/xml"
# Construct URL
url = self.get_account_info.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name'))
response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind'))
response_headers['x-ms-is-hns-enabled']=self._deserialize('bool', response.headers.get('x-ms-is-hns-enabled'))
if cls:
return cls(pipeline_response, None, response_headers)
get_account_info.metadata = {'url': '/'} # type: ignore
async def submit_batch(
self,
content_length: int,
multipart_content_type: str,
body: IO,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
**kwargs: Any
) -> IO:
"""The Batch operation allows multiple API calls to be embedded into a single HTTP request.
:param content_length: The length of the request.
:type content_length: long
:param multipart_content_type: Required. The value of this header must be multipart/mixed with
a batch boundary. Example header value: multipart/mixed; boundary=batch_:code:`<GUID>`.
:type multipart_content_type: str
:param body: Initial data.
:type body: IO
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IO, or the result of cls(response)
:rtype: IO
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[IO]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "batch"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.submit_batch.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long')
header_parameters['Content-Type'] = self._serialize.header("multipart_content_type", multipart_content_type, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'IO', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Content-Type']=self._deserialize('str', response.headers.get('Content-Type'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
deserialized = response.stream_download(self._client._pipeline)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
submit_batch.metadata = {'url': '/'} # type: ignore
async def filter_blobs(
self,
timeout: Optional[int] = None,
request_id_parameter: Optional[str] = None,
where: Optional[str] = None,
marker: Optional[str] = None,
maxresults: Optional[int] = None,
**kwargs: Any
) -> "_models.FilterBlobSegment":
"""The Filter Blobs operation enables callers to list blobs across all containers whose tags match
a given search expression. Filter blobs searches across all containers within a storage
account but can be scoped within the expression to a single container.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
Timeouts for Blob Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param where: Filters the results to return only to return only blobs whose tags match the
specified expression.
:type where: str
:param marker: A string value that identifies the portion of the list of containers to be
returned with the next listing operation. The operation returns the NextMarker value within the
response body if the listing operation did not return all containers remaining to be listed
with the current page. The NextMarker value can be used as the value for the marker parameter
in a subsequent call to request the next page of list items. The marker value is opaque to the
client.
:type marker: str
:param maxresults: Specifies the maximum number of containers to return. If the request does
not specify maxresults, or specifies a value greater than 5000, the server will return up to
5000 items. Note that if the listing operation crosses a partition boundary, then the service
will return a continuation token for retrieving the remainder of the results. For this reason,
it is possible that the service will return fewer results than specified by maxresults, or than
the default of 5000.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FilterBlobSegment, or the result of cls(response)
:rtype: ~azure.storage.blob.models.FilterBlobSegment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FilterBlobSegment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "blobs"
accept = "application/xml"
# Construct URL
url = self.filter_blobs.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if where is not None:
query_parameters['where'] = self._serialize.query("where", where, 'str')
if marker is not None:
query_parameters['marker'] = self._serialize.query("marker", marker, 'str')
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('FilterBlobSegment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
filter_blobs.metadata = {'url': '/'} # type: ignore
| |
import cStringIO
import mock
import os
import pytest
import re
import shutil
import stat
import tempfile
import time
from collections import OrderedDict
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from awx.main.expect import run, isolated_manager
from django.conf import settings
HERE, FILENAME = os.path.split(__file__)
@pytest.fixture(scope='function')
def rsa_key(request):
passphrase = 'passme'
key = rsa.generate_private_key(
public_exponent=65537,
key_size=1024,
backend=default_backend()
)
return (
key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.BestAvailableEncryption(passphrase)
),
passphrase
)
@pytest.fixture(scope='function')
def private_data_dir(request):
path = tempfile.mkdtemp(prefix='ansible_awx_unit_test')
request.addfinalizer(lambda: shutil.rmtree(path))
return path
@pytest.fixture(autouse=True)
def mock_sleep(request):
# the process teardown mechanism uses `time.sleep` to wait on processes to
# respond to SIGTERM; these are tests and don't care about being nice
m = mock.patch('time.sleep')
m.start()
request.addfinalizer(m.stop)
def test_simple_spawn():
stdout = cStringIO.StringIO()
status, rc = run.run_pexpect(
['ls', '-la'],
HERE,
{},
stdout,
cancelled_callback=lambda: False,
)
assert status == 'successful'
assert rc == 0
assert FILENAME in stdout.getvalue()
def test_error_rc():
stdout = cStringIO.StringIO()
status, rc = run.run_pexpect(
['ls', '-nonsense'],
HERE,
{},
stdout,
cancelled_callback=lambda: False,
)
assert status == 'failed'
# I'd expect 2, but we shouldn't risk making this test platform-dependent
assert rc > 0
def test_cancel_callback_error():
stdout = cStringIO.StringIO()
def bad_callback():
raise Exception('unique exception')
extra_fields = {}
status, rc = run.run_pexpect(
['ls', '-la'],
HERE,
{},
stdout,
cancelled_callback=bad_callback,
extra_update_fields=extra_fields
)
assert status == 'error'
assert rc == 0
assert extra_fields['job_explanation'] == "System error during job execution, check system logs"
def test_env_vars():
stdout = cStringIO.StringIO()
status, rc = run.run_pexpect(
['python', '-c', 'import os; print os.getenv("X_MY_ENV")'],
HERE,
{'X_MY_ENV': 'abc123'},
stdout,
cancelled_callback=lambda: False,
)
assert status == 'successful'
assert rc == 0
assert 'abc123' in stdout.getvalue()
def test_password_prompt():
stdout = cStringIO.StringIO()
expect_passwords = OrderedDict()
expect_passwords[re.compile(r'Password:\s*?$', re.M)] = 'secret123'
status, rc = run.run_pexpect(
['python', '-c', 'import time; print raw_input("Password: "); time.sleep(.05)'],
HERE,
{},
stdout,
cancelled_callback=lambda: False,
expect_passwords=expect_passwords
)
assert status == 'successful'
assert rc == 0
assert 'secret123' in stdout.getvalue()
def test_job_timeout():
stdout = cStringIO.StringIO()
extra_update_fields={}
status, rc = run.run_pexpect(
['python', '-c', 'import time; time.sleep(5)'],
HERE,
{},
stdout,
cancelled_callback=lambda: False,
extra_update_fields=extra_update_fields,
job_timeout=.01,
pexpect_timeout=0,
)
assert status == 'failed'
assert extra_update_fields == {'job_explanation': 'Job terminated due to timeout'}
def test_manual_cancellation():
stdout = cStringIO.StringIO()
status, rc = run.run_pexpect(
['python', '-c', 'print raw_input("Password: ")'],
HERE,
{},
stdout,
cancelled_callback=lambda: True, # this callable will cause cancellation
# the lack of password inputs will cause stdin to hang
pexpect_timeout=0,
)
assert status == 'canceled'
def test_build_isolated_job_data(private_data_dir, rsa_key):
pem, passphrase = rsa_key
mgr = isolated_manager.IsolatedManager(
['ls', '-la'], HERE, {}, cStringIO.StringIO(), ''
)
mgr.private_data_dir = private_data_dir
mgr.build_isolated_job_data()
path = os.path.join(private_data_dir, 'project')
assert os.path.isdir(path)
# <private_data_dir>/project is a soft link to HERE, which is the directory
# _this_ test file lives in
assert os.path.exists(os.path.join(path, FILENAME))
path = os.path.join(private_data_dir, 'artifacts')
assert os.path.isdir(path)
assert stat.S_IMODE(os.stat(path).st_mode) == stat.S_IXUSR + stat.S_IWUSR + stat.S_IRUSR # user rwx
path = os.path.join(private_data_dir, 'args')
with open(path, 'r') as f:
assert stat.S_IMODE(os.stat(path).st_mode) == stat.S_IRUSR # user r/o
assert f.read() == '["ls", "-la"]'
path = os.path.join(private_data_dir, '.rsync-filter')
with open(path, 'r') as f:
data = f.read()
assert data == '\n'.join([
'- /project/.git',
'- /project/.svn',
'- /project/.hg',
'- /artifacts/job_events/*-partial.json.tmp',
'- /env'
])
def test_run_isolated_job(private_data_dir, rsa_key):
env = {'JOB_ID': '1'}
pem, passphrase = rsa_key
mgr = isolated_manager.IsolatedManager(
['ls', '-la'], HERE, env, cStringIO.StringIO(), ''
)
mgr.private_data_dir = private_data_dir
secrets = {
'env': env,
'passwords': {
r'Enter passphrase for .*:\s*?$': passphrase
},
'ssh_key_data': pem
}
mgr.build_isolated_job_data()
stdout = cStringIO.StringIO()
# Mock environment variables for callback module
with mock.patch('os.getenv') as env_mock:
env_mock.return_value = '/path/to/awx/lib'
status, rc = run.run_isolated_job(private_data_dir, secrets, stdout)
assert status == 'successful'
assert rc == 0
assert FILENAME in stdout.getvalue()
assert '/path/to/awx/lib' in env['PYTHONPATH']
assert env['ANSIBLE_STDOUT_CALLBACK'] == 'awx_display'
assert env['ANSIBLE_CALLBACK_PLUGINS'] == '/path/to/awx/lib/isolated_callbacks'
assert env['AWX_ISOLATED_DATA_DIR'] == private_data_dir
def test_run_isolated_adhoc_command(private_data_dir, rsa_key):
env = {'AD_HOC_COMMAND_ID': '1'}
pem, passphrase = rsa_key
mgr = isolated_manager.IsolatedManager(
['pwd'], HERE, env, cStringIO.StringIO(), ''
)
mgr.private_data_dir = private_data_dir
secrets = {
'env': env,
'passwords': {
r'Enter passphrase for .*:\s*?$': passphrase
},
'ssh_key_data': pem
}
mgr.build_isolated_job_data()
stdout = cStringIO.StringIO()
# Mock environment variables for callback module
with mock.patch('os.getenv') as env_mock:
env_mock.return_value = '/path/to/awx/lib'
status, rc = run.run_isolated_job(private_data_dir, secrets, stdout)
assert status == 'successful'
assert rc == 0
# for ad-hoc jobs, `ansible` is invoked from the `private_data_dir`, so
# an ad-hoc command that runs `pwd` should print `private_data_dir` to stdout
assert private_data_dir in stdout.getvalue()
assert '/path/to/awx/lib' in env['PYTHONPATH']
assert env['ANSIBLE_STDOUT_CALLBACK'] == 'minimal'
assert env['ANSIBLE_CALLBACK_PLUGINS'] == '/path/to/awx/lib/isolated_callbacks'
assert env['AWX_ISOLATED_DATA_DIR'] == private_data_dir
def test_check_isolated_job(private_data_dir, rsa_key):
pem, passphrase = rsa_key
stdout = cStringIO.StringIO()
mgr = isolated_manager.IsolatedManager(['ls', '-la'], HERE, {}, stdout, '')
mgr.private_data_dir = private_data_dir
mgr.instance = mock.Mock(id=123, pk=123, verbosity=5, spec_set=['id', 'pk', 'verbosity'])
mgr.started_at = time.time()
mgr.host = 'isolated-host'
os.mkdir(os.path.join(private_data_dir, 'artifacts'))
with mock.patch('awx.main.expect.run.run_pexpect') as run_pexpect:
def _synchronize_job_artifacts(args, cwd, env, buff, **kw):
buff.write('checking job status...')
for filename, data in (
['status', 'failed'],
['rc', '1'],
['stdout', 'KABOOM!'],
):
with open(os.path.join(private_data_dir, 'artifacts', filename), 'w') as f:
f.write(data)
return ('successful', 0)
run_pexpect.side_effect = _synchronize_job_artifacts
with mock.patch.object(mgr, '_missing_artifacts') as missing_artifacts:
missing_artifacts.return_value = False
status, rc = mgr.check(interval=0)
assert status == 'failed'
assert rc == 1
assert stdout.getvalue() == 'KABOOM!'
run_pexpect.assert_called_with(
[
'ansible-playbook', 'check_isolated.yml',
'-u', settings.AWX_ISOLATED_USERNAME,
'-T', str(settings.AWX_ISOLATED_CONNECTION_TIMEOUT),
'-i', 'isolated-host,',
'-e', '{"src": "%s"}' % private_data_dir,
'-vvvvv'
],
'/awx_devel/awx/playbooks', mgr.management_env, mock.ANY,
cancelled_callback=None,
idle_timeout=0,
job_timeout=0,
pexpect_timeout=5,
proot_cmd='bwrap'
)
def test_check_isolated_job_timeout(private_data_dir, rsa_key):
pem, passphrase = rsa_key
stdout = cStringIO.StringIO()
extra_update_fields = {}
mgr = isolated_manager.IsolatedManager(['ls', '-la'], HERE, {}, stdout, '',
job_timeout=1,
extra_update_fields=extra_update_fields)
mgr.private_data_dir = private_data_dir
mgr.instance = mock.Mock(id=123, pk=123, verbosity=5, spec_set=['id', 'pk', 'verbosity'])
mgr.started_at = time.time()
mgr.host = 'isolated-host'
with mock.patch('awx.main.expect.run.run_pexpect') as run_pexpect:
def _synchronize_job_artifacts(args, cwd, env, buff, **kw):
buff.write('checking job status...')
return ('failed', 1)
run_pexpect.side_effect = _synchronize_job_artifacts
status, rc = mgr.check(interval=0)
assert status == 'failed'
assert rc == 1
assert stdout.getvalue() == 'checking job status...'
assert extra_update_fields['job_explanation'] == 'Job terminated due to timeout'
| |
#!/usr/bin/env python
import csv
import re
import argparse
import os
import subprocess
import logging
import sys
import getpass
import platform
#import toml
from argparse import RawTextHelpFormatter
from pcgr import pcgr_vars, utils
from pcgr.arg_checker import get_docker_image_version
from pcgr.utils import check_subprocess, getlogger, error_message, warn_message
GE_panels = {
0: "CPSR exploratory cancer predisposition panel (n = 433, Genomics England PanelApp / TCGA Germline Study / Cancer Gene Census / Other)",
1: "Adult solid tumours cancer susceptibility (Genomics England PanelApp)",
2: "Adult solid tumours for rare disease (Genomics England PanelApp)",
3: "Bladder cancer pertinent cancer susceptibility (Genomics England PanelApp)",
4: "Brain cancer pertinent cancer susceptibility (Genomics England PanelApp)",
5: "Breast cancer pertinent cancer susceptibility (Genomics England PanelApp)",
6: "Childhood solid tumours cancer susceptibility (Genomics England PanelApp)",
7: "Colorectal cancer pertinent cancer susceptibility (Genomics England PanelApp)",
8: "Endometrial cancer pertinent cancer susceptibility (Genomics England PanelApp)",
9: "Familial Tumours Syndromes of the central & peripheral Nervous system (Genomics England PanelApp)",
10: "Familial breast cancer (Genomics England PanelApp)",
11: "Familial melanoma (Genomics England PanelApp)",
12: "Familial prostate cancer (Genomics England PanelApp)",
13: "Familial rhabdomyosarcoma (Genomics England PanelApp)",
14: "GI tract tumours (Genomics England PanelApp)",
15: "Genodermatoses with malignancies (Genomics England PanelApp)",
16: "Haematological malignancies cancer susceptibility (Genomics England PanelApp)",
17: "Haematological malignancies for rare disease (Genomics England PanelApp)",
18: "Head and neck cancer pertinent cancer susceptibility (Genomics England PanelApp)",
19: "Inherited MMR deficiency (Lynch Syndrome) - Genomics England PanelApp",
20: "Inherited non-medullary thyroid cancer (Genomics England PanelApp)",
21: "Inherited ovarian cancer (without breast cancer) (Genomics England PanelApp)",
22: "Inherited pancreatic cancer (Genomics England PanelApp)",
23: "Inherited polyposis (Genomics England PanelApp)",
24: "Inherited predisposition to acute myeloid leukaemia (AML) - Genomics England PanelApp",
25: "Inherited predisposition to GIST (Genomics England PanelApp)",
26: "Inherited renal cancer (Genomics England PanelApp)",
27: "Inherited phaeochromocytoma and paraganglioma (Genomics England PanelApp)",
28: "Melanoma pertinent cancer susceptibility (Genomics England PanelApp)",
29: "Multiple endocrine tumours (Genomics England PanelApp)",
30: "Multiple monogenic benign skin tumours (Genomics England PanelApp)",
31: "Neuroendocrine cancer pertinent cancer susceptibility (Genomics England PanelApp)",
32: "Neurofibromatosis Type 1 (Genomics England PanelApp)",
33: "Ovarian cancer pertinent cancer susceptibility (Genomics England PanelApp)",
34: "Parathyroid Cancer (Genomics England PanelApp)",
35: "Prostate cancer pertinent cancer susceptibility (Genomics England PanelApp)",
36: "Renal cancer pertinent cancer susceptibility (Genomics England PanelApp)",
37: "Rhabdoid tumour predisposition (Genomics England PanelApp)",
38: "Sarcoma cancer susceptibility (Genomics England PanelApp)",
39: "Sarcoma susceptbility (Genomics England PanelApp)",
40: "Thyroid cancer pertinent cancer susceptibility (Genomics England PanelApp)",
41: "Tumour predisposition - childhood onset (Genomics England PanelApp)",
42: "Upper gastrointestinal cancer pertinent cancer susceptibility (Genomics England PanelApp)"
}
panels = '\n'.join([f'{k} = {GE_panels[k]}' for k in GE_panels]) # for displaying in help
def get_args():
program_description = "Cancer Predisposition Sequencing Reporter - report of " + \
"clinically significant cancer-predisposing germline variants"
program_options = " --input_vcf <INPUT_VCF> --pcgr_dir <PCGR_DIR> --output_dir <OUTPUT_DIR> --genome_assembly " + \
" <GENOME_ASSEMBLY> --sample_id <SAMPLE_ID>"
parser = argparse.ArgumentParser(description = program_description,
formatter_class=RawTextHelpFormatter, usage="%(prog)s -h [options] " + str(program_options))
parser._action_groups.pop()
required = parser.add_argument_group('Required arguments')
optional_panel = parser.add_argument_group("Panel options")
optional_vep = parser.add_argument_group('VEP options')
optional_vcfanno = parser.add_argument_group('vcfanno options')
optional_other = parser.add_argument_group('Other options')
optional_panel.add_argument('--panel_id',dest = "virtual_panel_id",type = str, default = "-1", help="Comma-separated string with identifier(s) of predefined virtual cancer predisposition gene panels,\n choose any combination of the following identifiers:\n" + str(panels))
optional_panel.add_argument('--custom_list',dest = "custom_list",help="Provide custom list of genes from virtual panel 0 (single-column txt file with Ensembl gene identifiers),\n alternative to predefined panels provided with --panel_id)")
optional_panel.add_argument('--custom_list_name',dest = "custom_list_name", default="Custom_Panel", help="Set name for custom made panel/list (single word - no whitespace), will be displayed in the report")
optional_panel.add_argument('--diagnostic_grade_only', action="store_true",help="For panel_id's 1-42 (Genomics England PanelApp) - consider genes with a GREEN status only, default: %(default)s")
optional_other.add_argument('--force_overwrite', action = "store_true", help='By default, the script will fail with an error if any output file already exists.\n You can force the overwrite of existing result files by using this flag, default: %(default)s')
#optional_other.add_argument('--version', action='version', version='%(prog)s ' + str(CPSR_VERSION))
optional_other.add_argument('--basic',action="store_true",help="Run functional variant annotation on VCF through VEP/vcfanno, omit Tier assignment/report generation (STEP 4), default: %(default)s")
optional_other.add_argument('--no_vcf_validate', action = "store_true",help="Skip validation of input VCF with Ensembl's vcf-validator, default: %(default)s")
optional_other.add_argument('--docker_uid', dest='docker_user_id', help='Docker user ID. Default is the host system user ID. If you are experiencing permission errors,\n try setting this up to root (`--docker_uid root`), default: %(default)s')
optional_other.add_argument('--no_docker', action='store_true', dest='no_docker', default=False, help='Run the CPSR workflow in a non-Docker mode, default: %(default)s')
optional_other.add_argument('--preserved_info_tags', dest ='preserved_info_tags', default='None', help='Comma-separated string of VCF INFO tags from query VCF that should be kept in CPSR output TSV')
optional_other.add_argument('--report_theme',choices = ['default','cerulean','journal','flatly','readable','spacelab','united','cosmo','lumen','paper','sandstone','simplex','yeti'], default = 'default', help='Visual report theme (rmarkdown), default: %(default)s' )
optional_other.add_argument('--report_nonfloating_toc', action='store_true', help='Do not float the table of contents (TOC) in output HTML report, default: %(default)s')
optional_other.add_argument('--report_table_display', choices = ['full','light'], default='light', help="Set the level of detail/comprehensiveness in interactive datables of HTML report, very comprehensive (option 'full') or slim/focused ('light'), default: %(default)s")
optional_other.add_argument('--ignore_noncoding', action='store_true',dest='ignore_noncoding',default=False,help='Do not list non-coding variants in HTML report, default: %(default)s')
optional_other.add_argument('--secondary_findings', action='store_true',dest='secondary_findings',default=False, help='Include variants found in ACMG-recommended list for secondary findings (v3.0), default: %(default)s')
optional_other.add_argument('--gwas_findings', action='store_true',dest='gwas_findings',default=False, help='Report overlap with low to moderate cancer risk variants (tag SNPs) identified from genome-wide association studies, default: %(default)s')
optional_other.add_argument('--gwas_p_value', type = float, default = 0.000005, dest = 'gwas_p_value',help='Required p-value for variants listed as hits from genome-wide association studies, default: %(default)s')
optional_other.add_argument('--pop_gnomad',choices = ['afr','amr','eas','sas','asj','nfe','fin','global'], default='nfe', help='Population source in gnomAD used for variant frequency assessment (ACMG classification), default: %(default)s')
optional_other.add_argument('--maf_upper_threshold', type = float, default = 0.9, dest = 'maf_upper_threshold',help='Upper MAF limit (gnomAD global population frequency) for variants to be included in the report, default: %(default)s')
optional_other.add_argument('--classify_all', action='store_true',dest='classify_all',help='Provide CPSR variant classifications (TIER 1-5) also for variants with exising ClinVar classifications in output TSV, default: %(default)s')
optional_other.add_argument('--clinvar_ignore_noncancer', action='store_true', help='Ignore (exclude from report) ClinVar-classified variants reported only for phenotypes/conditions NOT related to cancer, default: %(default)s')
optional_other.add_argument('--debug',action='store_true',default=False, help='Print full docker commands to log, default: %(default)s')
optional_vcfanno.add_argument('--vcfanno_n_proc', default = 4, type = int, help="Number of vcfanno processes (option '-p' in vcfanno), default: %(default)s")
optional_vep.add_argument('--vep_n_forks', default = 4, type = int, help="Number of forks (option '--fork' in VEP), default: %(default)s")
optional_vep.add_argument('--vep_buffer_size', default = 500, type = int, help="Variant buffer size (variants read into memory simultaneously, option '--buffer_size' in VEP) " + \
"\n- set lower to reduce memory usage, default: %(default)s")
#optional_vep.add_argument('--vep_regulatory', action='store_true', help = 'Enable Variant Effect Predictor (VEP) to look for overlap with regulatory regions (option --regulatory in VEP).')
optional_vep.add_argument('--vep_gencode_all', action='store_true', help = "Consider all GENCODE transcripts with Variant Effect Predictor (VEP) (option '--gencode_basic' in VEP is used by default).")
optional_vep.add_argument('--vep_pick_order', default = "canonical,appris,biotype,ccds,rank,tsl,length,mane", help="Comma-separated string " + \
"of ordered transcript properties for primary variant pick\n ( option '--pick_order' in VEP), default: %(default)s")
optional_vep.add_argument('--vep_no_intergenic', action = "store_true", help="Skip intergenic variants during processing (option '--no_intergenic' in VEP), default: %(default)s")
required.add_argument('--input_vcf', help='VCF input file with germline query variants (SNVs/InDels).', required = True)
required.add_argument('--pcgr_dir',help=f"Directory that contains the PCGR data bundle directory, e.g. ~/pcgr-{pcgr_vars.PCGR_VERSION}", required = True)
required.add_argument('--output_dir',help='Output directory', required = True)
required.add_argument('--genome_assembly',choices = ['grch37','grch38'], help='Genome assembly build: grch37 or grch38', required = True)
required.add_argument('--sample_id',help="Sample identifier - prefix for output files", required = True)
args = parser.parse_args()
return vars(args)
def main():
arg_dict = get_args()
logger = getlogger('cpsr-validate-input-arguments')
logger.info("STEP 0: Validate input data")
# check parsed arguments
check_args(arg_dict, logger)
# check and get docker image version
DOCKER_IMAGE_VERSION = get_docker_image_version(arg_dict, logger)
## Map local input directories and files to internal paths/volumes in container (Docker)
host_directories = verify_input_files(arg_dict, logger)
## Run CPSR workflow
run_cpsr(arg_dict, host_directories, DOCKER_IMAGE_VERSION)
def check_args(arg_dict, logger):
arg_dict['vep_regulatory'] = True
## Required arguments
## Check that query VCF is set and exists
if arg_dict['input_vcf'] is None or not os.path.exists(arg_dict['input_vcf']):
err_msg = "Required argument '--input_vcf' does not exist (" + str(arg_dict['input_vcf']) + "). Type cpsr.py --help to view all options and required arguments"
error_message(err_msg,logger)
## Check that PCGR directory (with data bundle) is provided and exists
if arg_dict['pcgr_dir'] is None or not os.path.exists(arg_dict['pcgr_dir']):
err_msg = "Required argument '--pcgr_dir' does not exist (" + str(arg_dict['pcgr_dir']) + "). Type cpsr.py --help to view all options and required arguments"
error_message(err_msg,logger)
## Check that output directory is provided and exists
if arg_dict['output_dir'] is None or not os.path.exists(arg_dict['output_dir']):
err_msg = "Required argument '--output_dir' does not exist (" + str(arg_dict['output_dir']) + "). Type cpsr.py --help to view all options and required arguments"
error_message(err_msg,logger)
## Check that genome assembly is set
if arg_dict['genome_assembly'] is None:
err_msg = "Required argument '--genome_assembly' has no/undefined value (\'" + str(arg_dict['genome_assembly']) + "'). Type cpsr.py --help to view all options and required arguments"
error_message(err_msg,logger)
## Check that sample identifier is set and is of appropriate length (minimum two characters)
if arg_dict['sample_id'] is None:
err_msg = "Required argument '--sample_id' has no/undefined value (" + str(arg_dict['sample_id']) + "). Type cpsr.py --help to view all options and required arguments"
error_message(err_msg,logger)
if len(arg_dict['sample_id']) <= 2:
err_msg = "Sample name identifier ('--sample_id') requires a name with more than two characters. Current sample identifier: " + str(arg_dict['sample_id'])
error_message(err_msg,logger)
### Optional arguments
## Provide virtual_panel_id or a custom list from panel 0
if arg_dict['virtual_panel_id'] == "-1" and not arg_dict['custom_list']:
err_msg = 'Provide valid virtual panel identifier(s) through --panel_id (0 - 42) or provide custom list of panel 0 genes (single column text file) through --custom_list'
error_message(err_msg,logger)
if arg_dict['custom_list'] and arg_dict['virtual_panel_id'] != "-1":
err_msg = "Option --panel_id cannot be used in conjunction with --custom_list"
error_message(err_msg, logger)
if arg_dict['maf_upper_threshold'] <= 0 or arg_dict['maf_upper_threshold'] > 1:
err_msg = 'MAF upper threshold must be greater than 0 and below 1, current value is ' + str(arg_dict['maf_upper_threshold'])
error_message(err_msg,logger)
if arg_dict['vcfanno_n_proc'] <= 0 or arg_dict['vcfanno_n_proc'] > 15:
err_msg = 'Number of processes that vcfanno can use during annotation must be above 0 and not more than 15, current value is ' + str(arg_dict['vcfanno_n_proc'])
error_message(err_msg,logger)
## Check that panel identifier(s) are set appropriately
if arg_dict['virtual_panel_id'] != "-1" and not arg_dict['custom_list']:
if not ',' in arg_dict['virtual_panel_id']:
if str(arg_dict['virtual_panel_id']).isdigit():
panel_id = int(arg_dict['virtual_panel_id'])
if not (panel_id >= 0 and panel_id <= 42):
err_msg = 'A single panel chosen with \'--panel_id\' must be in the range 0 - 42'
error_message(err_msg, logger)
else:
err_msg = 'A single panel chosen with \'--panel_id\' must be a proper integer - not \'' + str(arg_dict['virtual_panel_id']) + '\''
error_message(err_msg, logger)
else:
panels = str(arg_dict['virtual_panel_id']).split(',')
for p in panels:
#p = int(p)
if str(p).isdigit():
panel_id = int(p)
if panel_id < 1 or panel_id > 42:
err_msg = 'Multiple panels submitted as comma-separated string with \'--panel_id\' must take values in the range 1 - 42'
error_message(err_msg, logger)
else:
err_msg = 'Multiple panels submitted as comma-separated string with \'--panel_id\' must contain proper integer values only - \'' + str(arg_dict['virtual_panel_id']) + '\' contains non-integer entries'
error_message(err_msg, logger)
if (arg_dict['custom_list'] or arg_dict['virtual_panel_id'] == "0" ) and arg_dict['diagnostic_grade_only']:
warn_msg = 'Option \'--diagnostic_grade_only\' applies ONLY to panel identifiers from Genomics England PanelApp - will be ignored'
warn_message(warn_msg, logger)
## VEP options
if arg_dict['vep_n_forks'] <= 0 or arg_dict['vep_n_forks'] > 4:
err_msg = 'Number of forks that VEP can use during annotation must be above 0 and not more than 4, current value is ' + str(arg_dict['vep_n_forks'])
error_message(err_msg,logger)
if arg_dict['vep_buffer_size'] <= 0 or arg_dict['vep_buffer_size'] > 30000:
err_msg = 'Internal VEP buffer size, corresponding to the number of variants that are read in to memory simultaneously, must be above 0 and not more than 30,000, current value is ' + str(arg_dict['vep_buffer_size'])
error_message(err_msg,logger)
## Check that VEP pick criteria is formatted correctly
if not arg_dict['vep_pick_order'] is None:
values = str(arg_dict['vep_pick_order']).split(',')
permitted_sources = ['canonical','appris','tsl','biotype','ccds','rank','length','mane']
num_permitted_sources = 0
for v in values:
if v in permitted_sources:
num_permitted_sources += 1
if num_permitted_sources != 8:
err_msg = "Option 'vep_pick_order' = " + str(arg_dict['vep_pick_order']) + " is formatted incorrectly, should be " + \
"a comma-separated string of the following values: canonical,appris,tsl,biotype,ccds,rank,length,mane"
error_message(err_msg, logger)
def verify_input_files(arg_dict, logger):
input_vcf_dir = "NA"
db_dir = "NA"
base_dir = "NA"
output_dir_full = "NA"
input_vcf_basename = "NA"
input_customlist_basename = "NA"
input_customlist_dir = "NA"
## check the existence of given output folder
output_dir_full = os.path.abspath(arg_dict['output_dir'])
if not os.path.isdir(output_dir_full):
err_msg = "Output directory (" + str(output_dir_full) + ") does not exist"
error_message(err_msg,logger)
## check if input BED exist
if not arg_dict['custom_list'] is None:
if not os.path.exists(os.path.abspath(arg_dict['custom_list'])):
err_msg = "Input file (" + str(arg_dict['custom_list']) + ") does not exist"
error_message(err_msg,logger)
input_customlist_basename = os.path.basename(str(arg_dict['custom_list']))
input_customlist_dir = os.path.dirname(os.path.abspath(arg_dict['custom_list']))
## check if input vcf exist
if not arg_dict['input_vcf'] is None:
if not os.path.exists(os.path.abspath(arg_dict['input_vcf'])):
err_msg = "Input file (" + str(arg_dict['input_vcf']) + ") does not exist"
error_message(err_msg,logger)
if not (os.path.abspath(arg_dict['input_vcf']).endswith('.vcf') or os.path.abspath(arg_dict['input_vcf']).endswith('.vcf.gz')):
err_msg = "VCF input file (" + os.path.abspath(arg_dict['input_vcf']) + ") does not have the correct file extension (.vcf or .vcf.gz)"
error_message(err_msg,logger)
## check that tabix file exist if bgzipped files is given
if os.path.abspath(arg_dict['input_vcf']).endswith('.vcf.gz'):
tabix_file = arg_dict['input_vcf'] + '.tbi'
if not os.path.exists(os.path.abspath(tabix_file)):
err_msg = "Tabix file (i.e. '.gz.tbi') is not present for the bgzipped VCF input file (" + os.path.abspath(arg_dict['input_vcf']) + "). Please make sure your input VCF is properly compressed and indexed (bgzip + tabix)"
error_message(err_msg,logger)
input_vcf_basename = os.path.basename(str(arg_dict['input_vcf']))
input_vcf_dir = os.path.dirname(os.path.abspath(arg_dict['input_vcf']))
## if output vcf exist and overwrite not set
output_vcf = os.path.join(str(output_dir_full),str(arg_dict['sample_id'])) + '.cpsr.' + str(arg_dict['genome_assembly']) + '.vcf.gz'
if os.path.exists(output_vcf) and arg_dict['force_overwrite'] is False:
err_msg = "Output files (e.g. " + str(output_vcf) + ") already exist - please specify different sample_id or add option --force_overwrite"
error_message(err_msg,logger)
## check the existence of base folder
base_dir = os.path.abspath(arg_dict['pcgr_dir'])
if not os.path.isdir(base_dir):
err_msg = "Base directory (" + str(base_dir) + ") does not exist"
error_message(err_msg,logger)
## check the existence of data folder within the base folder
db_dir = os.path.join(os.path.abspath(arg_dict['pcgr_dir']),'data')
if not os.path.isdir(db_dir):
err_msg = "Data directory (" + str(db_dir) + ") does not exist"
error_message(err_msg,logger)
## check the existence of specified assembly data folder within the base folder
db_assembly_dir = os.path.join(os.path.abspath(arg_dict['pcgr_dir']),'data',arg_dict['genome_assembly'])
if not os.path.isdir(db_assembly_dir):
err_msg = "Data directory for the specified genome assembly (" + str(db_assembly_dir) + ") does not exist"
error_message(err_msg,logger)
## check the existence of RELEASE_NOTES
rel_notes_file = os.path.join(os.path.abspath(arg_dict['pcgr_dir']),'data',arg_dict['genome_assembly'],'RELEASE_NOTES')
if not os.path.exists(rel_notes_file):
err_msg = 'The PCGR data bundle is outdated - please download the latest data bundle (see github.com/sigven/cpsr for instructions)'
error_message(err_msg,logger)
f_rel_not = open(rel_notes_file,'r')
compliant_data_bundle = 0
for line in f_rel_not:
if pcgr_vars.DB_VERSION in line:
compliant_data_bundle = 1
f_rel_not.close()
if compliant_data_bundle == 0:
err_msg = 'The PCGR data bundle is not compliant with the software version - please download the latest software and data bundle (see https://github.com/sigven/cpsr for instructions)'
error_message(err_msg,logger)
host_directories = {}
host_directories['input_vcf_dir_host'] = input_vcf_dir
host_directories['input_customlist_dir_host'] = input_customlist_dir
host_directories['db_dir_host'] = db_assembly_dir
host_directories['base_dir_host'] = base_dir
host_directories['output_dir_host'] = output_dir_full
host_directories['input_vcf_basename_host'] = input_vcf_basename
host_directories['input_customlist_basename_host'] = input_customlist_basename
return host_directories
def run_cpsr(arg_dict, host_directories, DOCKER_IMAGE_VERSION):
"""
Main function to run the CPSR workflow (Docker/Conda)
"""
## get options
debug = arg_dict['debug']
docker_user_id = arg_dict['docker_user_id']
diagnostic_grade_only = 0
vcf_validation = 1
virtual_panel_id = "-1"
ignore_noncoding = 0
gwas_findings = 0
secondary_findings = 0
classify_all = 0
clinvar_ignore_noncancer = 0
report_nonfloating_toc = 0
vep_no_intergenic = 0
vep_regulatory = 0
preserved_info_tags = arg_dict['preserved_info_tags']
diagnostic_grade_set = "OFF"
secondary_findings_set = "OFF"
gwas_findings_set = "OFF"
#vep_regulatory = "OFF"
if arg_dict['vep_regulatory']:
vep_regulatory = 1
if arg_dict["vep_no_intergenic"]:
vep_no_intergenic = 1
if arg_dict['clinvar_ignore_noncancer']:
clinvar_ignore_noncancer = 1
if arg_dict['classify_all']:
classify_all = 1
if arg_dict['gwas_findings']:
gwas_findings = 1
gwas_findings_set = "ON"
if arg_dict['secondary_findings']:
secondary_findings = 1
secondary_findings_set = "ON"
if arg_dict['diagnostic_grade_only']:
diagnostic_grade_only = 1
diagnostic_grade_set = "ON"
if arg_dict['report_nonfloating_toc']:
report_nonfloating_toc = 1
if arg_dict['no_vcf_validate']:
vcf_validation = 0
if arg_dict['virtual_panel_id'] != "-1":
virtual_panel_id = arg_dict['virtual_panel_id']
if arg_dict['custom_list']:
virtual_panel_id = "-1"
if arg_dict['ignore_noncoding']:
ignore_noncoding = 1
logger = getlogger('cpsr-validate-input-arguments')
## set basic Docker run commands
output_vcf = 'None'
output_pass_vcf = 'None'
output_pass_tsv = 'None'
uid = ''
GENCODE_VERSION = pcgr_vars.GENCODE_VERSION
VEP_ASSEMBLY = pcgr_vars.VEP_ASSEMBLY
VEP_VERSION = pcgr_vars.VEP_VERSION
if arg_dict['genome_assembly'] == 'grch37':
GENCODE_VERSION = '19'
VEP_ASSEMBLY = 'GRCh37'
if docker_user_id:
uid = docker_user_id
elif platform.system() == 'Linux' or platform.system() == 'Darwin' or sys.platform == 'darwin' or sys.platform == 'linux2' or sys.platform == 'linux':
uid = os.getuid()
else:
if platform.system() == 'Windows' or sys.platform == 'win32' or sys.platform == 'cygwin':
uid = getpass.getuser()
if uid == '':
logger.warning('Was not able to get user id/username for logged-in user on the underlying platform (platform.system(): ' + str(platform.system()) + ', sys.platform: ' + str(sys.platform) + '), now running CPSR as root')
uid = 'root'
vepdb_dir_host = os.path.join(str(host_directories['db_dir_host']),'.vep')
input_vcf_docker = 'None'
input_customlist_docker = 'None'
## Determine basic Docker commands
if DOCKER_IMAGE_VERSION:
vep_volume_mapping = str(vepdb_dir_host) + ":/usr/local/share/vep/data"
databundle_volume_mapping = str(host_directories['base_dir_host']) + ":/data"
input_vcf_volume_mapping = str(host_directories['input_vcf_dir_host']) + ":/workdir/input_vcf"
input_customlist_volume_mapping = str(host_directories['input_customlist_dir_host']) + ":/workdir/input_custom"
output_volume_mapping = str(host_directories['output_dir_host']) + ":/workdir/output"
if host_directories['input_vcf_basename_host'] != 'NA':
input_vcf_docker = '/workdir/input_vcf/' + str(host_directories['input_vcf_basename_host'])
if host_directories['input_customlist_basename_host'] != 'NA':
input_customlist_docker = '/workdir/input_custom/' + str(host_directories['input_customlist_basename_host'])
docker_command_run1 = "docker run --rm -u " + str(uid) + " -v=" + str(databundle_volume_mapping) + " -v=" + str(vep_volume_mapping) + " -v=" + str(output_volume_mapping)
if host_directories['input_vcf_dir_host'] != 'NA':
docker_command_run1 = docker_command_run1 + " -v=" + str(input_vcf_volume_mapping)
if host_directories['input_customlist_dir_host'] != 'NA':
docker_command_run1 = docker_command_run1 + " -v=" + str(input_customlist_volume_mapping)
docker_command_run1 = docker_command_run1 + " -w=/workdir/output " + str(DOCKER_IMAGE_VERSION) + " sh -c \""
docker_command_run2 = "docker run --rm -u " + str(uid) + " -v=" + str(databundle_volume_mapping) + " -v=" + str(output_volume_mapping) + " -w=/workdir/output " + str(DOCKER_IMAGE_VERSION) + " sh -c \""
docker_command_run_end = '\"'
data_dir = '/data'
output_dir = '/workdir/output'
vep_dir = '/usr/local/share/vep/data'
r_scripts_dir = '/'
## If run in no-docker mode, set commands accordingly
else:
if host_directories['input_vcf_basename_host'] != 'NA':
input_vcf_docker = os.path.join(host_directories['input_vcf_dir_host'], host_directories['input_vcf_basename_host'])
if host_directories['input_customlist_basename_host'] != 'NA':
input_customlist_docker = os.path.join(host_directories['input_customlist_dir_host'], host_directories['input_customlist_basename_host'])
docker_command_run1 = ''
docker_command_run2 = ''
docker_command_run_end = ''
data_dir = host_directories['base_dir_host']
output_dir = host_directories['output_dir_host']
vep_dir = vepdb_dir_host
r_scripts_dir = ''
check_subprocess(logger, docker_command_run1.replace("-u " + str(uid), "") + 'mkdir -p ' + output_dir + docker_command_run_end, debug)
## CPSR|Validate input VCF - check formatting, non-overlap with CPSR INFO tags, and whether sample contains any variants in cancer predisposition loci
vcf_validate_command = docker_command_run1 + "cpsr_validate_input.py" + " " + data_dir + " " + str(input_vcf_docker) + " " + \
str(input_customlist_docker) + " " + str(preserved_info_tags) + " " + str(vcf_validation) + " " + str(arg_dict['genome_assembly']) + " " + \
str(arg_dict['sample_id']) + " " + str(virtual_panel_id) + " " + str(diagnostic_grade_only)
if debug:
vcf_validate_command += ' --debug'
if not DOCKER_IMAGE_VERSION:
vcf_validate_command += ' --output_dir ' + output_dir + docker_command_run_end
else:
vcf_validate_command += docker_command_run_end
check_subprocess(logger, vcf_validate_command, debug)
logger.info('Finished')
## CPSR|Start - log key information about run
logger = getlogger("cpsr-start")
print()
logger.info("--- Cancer Predisposition Sequencing Reporter workflow ----")
logger.info("Sample name: " + str(arg_dict['sample_id']))
if not input_customlist_docker == 'None':
logger.info("Virtual gene panel: custom-made list from panel 0: " + str(input_customlist_docker))
else:
#logger.info("Virtual gene panel(s): " + str(GE_panels[virtual_panel_id]))
logger.info("Diagnostic-grade genes in virtual panels (GE PanelApp): " + str(diagnostic_grade_set))
logger.info("Include incidential findings (ACMG recommended list v3.0): " + str(secondary_findings_set))
logger.info("Include low to moderate cancer risk variants from genome-wide association studies: " + str(gwas_findings_set))
logger.info("Reference population, germline variant frequencies (gnomAD): " + str(arg_dict['pop_gnomad']).upper())
logger.info("Genome assembly: " + str(arg_dict['genome_assembly']))
if not input_vcf_docker == 'None':
## Define input, output and temporary file names
pcgr_model = 'cpsr'
output_vcf = os.path.join(output_dir, str(arg_dict['sample_id']) + '.cpsr.' + str(arg_dict['genome_assembly']) + '.vcf.gz')
output_pass_vcf = os.path.join(output_dir, str(arg_dict['sample_id']) + '.cpsr.' + str(arg_dict['genome_assembly']) + '.pass.vcf.gz')
output_pass_tsv = os.path.join(output_dir, str(arg_dict['sample_id']) + '.cpsr.' + str(arg_dict['genome_assembly']) + '.pass.tsv')
input_vcf_cpsr_ready = os.path.join(output_dir, re.sub(r'(\.vcf$|\.vcf\.gz$)','.cpsr_ready_target.vcf.gz',host_directories['input_vcf_basename_host']))
input_vcf_cpsr_ready_uncompressed = os.path.join(output_dir, re.sub(r'(\.vcf$|\.vcf\.gz$)','.cpsr_ready_target.vcf',host_directories['input_vcf_basename_host']))
vep_vcf = re.sub(r'(\.vcf$|\.vcf\.gz$)','.cpsr_vep.vcf',input_vcf_cpsr_ready)
vep_vcfanno_vcf = re.sub(r'(\.vcf$|\.vcf\.gz$)','.cpsr_vep.vcfanno.vcf',input_vcf_cpsr_ready)
vep_vcfanno_annotated_vcf = re.sub(r'\.vcfanno','.vcfanno.annotated',vep_vcfanno_vcf) + '.gz'
vep_vcfanno_annotated_pass_vcf = re.sub(r'\.vcfanno','.vcfanno.annotated.pass',vep_vcfanno_vcf) + '.gz'
custom_bed = os.path.join(output_dir, str(arg_dict['sample_id']) + '.' + str(pcgr_model) + '.' + str(arg_dict['genome_assembly']) + '.custom_list.bed')
## File names for assembly-specific genome fasta files (VEP)
fasta_assembly = os.path.join(vep_dir, "homo_sapiens", str(VEP_VERSION) + "_" + str(VEP_ASSEMBLY), "Homo_sapiens." + str(VEP_ASSEMBLY) + ".dna.primary_assembly.fa.gz")
ancestor_assembly = os.path.join(vep_dir, "homo_sapiens", str(VEP_VERSION) + "_" + str(VEP_ASSEMBLY), "human_ancestor.fa.gz")
## Set all flags used in VEP run
plugins_in_use = "NearestExonJB, LoF"
vep_flags = "--format vcf --vcf --check_ref --flag_pick_allele_gene --hgvs --dont_skip --failed 1 --af --af_1kg --af_gnomad " + \
"--variant_class --domains --symbol --protein --ccds --uniprot --appris --biotype --canonical --cache " + \
"--numbers --total_length --no_stats --allele_number --no_escape --xref_refseq --plugin NearestExonJB,max_range=50000"
vep_options = "--pick_order " + str(arg_dict['vep_pick_order']) + " --force_overwrite --buffer_size " + \
str(arg_dict['vep_buffer_size']) + " --species homo_sapiens --assembly " + \
str(VEP_ASSEMBLY) + " --offline --fork " + str(arg_dict['vep_n_forks']) + " " + str(vep_flags) + " --dir " + str(vep_dir)
loftee_dir = '/opt/vep/src/ensembl-vep/modules'
vep_options += f' --cache_version {pcgr_vars.VEP_VERSION}'
gencode_set_in_use = "GENCODE - all transcripts"
if arg_dict['vep_gencode_all'] == 0:
vep_options += ' --gencode_basic'
gencode_set_in_use = "GENCODE - basic transcript set (--gencode_basic)"
if arg_dict['vep_no_intergenic'] == 1:
vep_options = vep_options + " --no_intergenic"
if arg_dict['vep_regulatory'] == 1:
vep_options = vep_options + " --regulatory"
if arg_dict['genome_assembly'] == "grch38":
vep_options = vep_options + " --mane"
if not DOCKER_IMAGE_VERSION:
conda_prefix = os.path.dirname(os.path.dirname(sys.executable))
loftee_dir = os.path.join(conda_prefix, 'share', 'loftee')
assert os.path.isdir(loftee_dir), 'LoF VEP plugin is not found in ' + loftee_dir + '. Please make sure you installed pcgr conda package and have corresponding conda environment active.'
vep_options += " --plugin LoF,loftee_path:" + loftee_dir + ",human_ancestor_fa:" + str(ancestor_assembly) + ",use_gerp_end_trunc:0 --dir_plugins " + loftee_dir
else:
vep_options += " --plugin LoF,loftee_path:" + loftee_dir + ",human_ancestor_fa:" + str(ancestor_assembly) + ",use_gerp_end_trunc:0 --dir_plugins " + loftee_dir
if not debug:
vep_options += " --quiet"
## Compose full VEP command
vep_main_command = f'{docker_command_run1} {utils.get_perl_exports()} && vep --input_file {input_vcf_cpsr_ready} --output_file {vep_vcf} {vep_options} --fasta {fasta_assembly} {docker_command_run_end}'
vep_bgzip_command = str(docker_command_run1) + "bgzip -f " + str(vep_vcf) + docker_command_run_end
vep_tabix_command = str(docker_command_run1) + "tabix -f -p vcf " + str(vep_vcf) + ".gz" + docker_command_run_end
logger = getlogger('cpsr-vep')
## CPSR|VEP - run Variant Effect Predictor on query VCF with LoF and NearestExonJB plugins
print()
logger.info("STEP 1: Basic variant annotation with Variant Effect Predictor (" + str(VEP_VERSION) + ", GENCODE release " + \
str(GENCODE_VERSION) + ", " + str(arg_dict['genome_assembly']) + ")")
logger.info("VEP configuration - one primary consequence block pr. alternative allele (--flag_pick_allele)")
logger.info("VEP configuration - transcript pick order: " + str(arg_dict['vep_pick_order']))
logger.info("VEP configuration - transcript pick order: See more at https://www.ensembl.org/info/docs/tools/vep/script/vep_other.html#pick_options")
logger.info(f'VEP configuration - GENCODE set: {gencode_set_in_use}')
logger.info("VEP configuration - skip intergenic: " + str(arg_dict['vep_no_intergenic']))
logger.info("VEP configuration - look for overlap with regulatory regions: " + str(vep_regulatory))
logger.info("VEP configuration - plugins in use: " + str(plugins_in_use))
logger.info("VEP configuration - buffer_size/number of forks: " + str(arg_dict['vep_buffer_size']) + '/' + str(arg_dict['vep_n_forks']))
check_subprocess(logger, vep_main_command, debug)
check_subprocess(logger, vep_bgzip_command, debug)
check_subprocess(logger, vep_tabix_command, debug)
logger.info("Finished")
## CPSR|vcfanno - run vcfanno on query VCF with a number of relevant annotated VCFs
print()
logger = getlogger('cpsr-vcfanno')
logger.info("STEP 2: Annotation for cancer predisposition with cpsr-vcfanno (ClinVar, CIViC, dbNSFP, dbMTS, UniProtKB, cancerhotspots.org, ncER, GERP RS scores, GWAS catalog, gnomAD non-cancer subset)")
pcgr_vcfanno_command = str(docker_command_run2) + "pcgr_vcfanno.py --num_processes " + str(arg_dict['vcfanno_n_proc']) + \
" --dbnsfp --clinvar --cancer_hotspots --dbmts --ncer --gerp --civic --uniprot --gnomad_cpsr --pcgr_onco_xref --gwas --rmsk " + str(vep_vcf) + ".gz " + \
str(vep_vcfanno_vcf) + " " + os.path.join(data_dir, "data", str(arg_dict['genome_assembly'])) + docker_command_run_end
check_subprocess(logger, pcgr_vcfanno_command, debug)
logger.info("Finished")
## CPSR|summarise - expand annotations with separate VCF INFO tags
print()
logger = getlogger("cpsr-summarise")
pcgr_summarise_command = str(docker_command_run2) + "pcgr_summarise.py " + str(vep_vcfanno_vcf) + ".gz 0 " + \
str(vep_regulatory) + " " + os.path.join(data_dir, "data", str(arg_dict['genome_assembly'])) + " --cpsr" + docker_command_run_end
if debug:
pcgr_summarise_command += ' --debug'
logger.info("STEP 3: Cancer gene annotations with cpsr-summarise")
check_subprocess(logger, pcgr_summarise_command, debug)
## CPSR|clean - rename output files, remove temporary files
create_output_vcf_command1 = str(docker_command_run2) + 'mv ' + str(vep_vcfanno_annotated_vcf) + ' ' + str(output_vcf) + docker_command_run_end
create_output_vcf_command2 = str(docker_command_run2) + 'mv ' + str(vep_vcfanno_annotated_vcf) + '.tbi ' + str(output_vcf) + '.tbi' + docker_command_run_end
create_output_vcf_command3 = str(docker_command_run2) + 'mv ' + str(vep_vcfanno_annotated_pass_vcf) + ' ' + str(output_pass_vcf) + docker_command_run_end
create_output_vcf_command4 = str(docker_command_run2) + 'mv ' + str(vep_vcfanno_annotated_pass_vcf) + '.tbi ' + str(output_pass_vcf) + '.tbi' + docker_command_run_end
clean_command = str(docker_command_run2) + 'rm -f ' + str(vep_vcf) + '* ' + str(vep_vcfanno_annotated_vcf) + ' ' + \
str(vep_vcfanno_annotated_pass_vcf) + '* ' + str(vep_vcfanno_vcf) + '* ' + str(input_vcf_cpsr_ready_uncompressed) + "* " + docker_command_run_end
check_subprocess(logger, create_output_vcf_command1, debug)
check_subprocess(logger, create_output_vcf_command2, debug)
check_subprocess(logger, create_output_vcf_command3, debug)
check_subprocess(logger, create_output_vcf_command4, debug)
## CPSR|vcf2tsv - perform vcf2tsv conversion on the final annotated VCF file
cpsr_vcf2tsv_command = str(docker_command_run2) + "vcf2tsv.py " + str(output_pass_vcf) + " --compress " + str(output_pass_tsv) + docker_command_run_end
logger.info("Converting VCF to TSV with https://github.com/sigven/vcf2tsv")
check_subprocess(logger, cpsr_vcf2tsv_command, debug)
if not debug:
check_subprocess(logger, clean_command, debug)
logger.info("Finished")
print()
## Generation of HTML reports for VEP/vcfanno-annotated VCF file
if not arg_dict['basic']:
logger = getlogger('cpsr-writer')
logger.info("STEP 4: Generation of output files - Cancer predisposition sequencing report")
# export PATH to R conda env Rscript
rscript = utils.script_path('pcgrr', 'bin/Rscript', DOCKER_IMAGE_VERSION)
cpsrr_script = utils.script_path('pcgr', 'bin/cpsr.R', DOCKER_IMAGE_VERSION)
cpsr_report_command = (
f"{docker_command_run1} "
f"{rscript} {cpsrr_script} "
f"{output_dir} "
f"{output_pass_tsv}.gz "
f"{arg_dict['sample_id']} "
f"{pcgr_vars.PCGR_VERSION} "
f"{pcgr_vars.DB_VERSION} "
f"{arg_dict['genome_assembly']} "
f"{data_dir} "
f"{virtual_panel_id} "
f"{preserved_info_tags} "
f"{custom_bed} "
f"{arg_dict['custom_list_name']} "
f"{arg_dict['report_theme']} "
f"{arg_dict['report_table_display']} "
f"{report_nonfloating_toc} "
f"{gwas_findings} "
f"{arg_dict['gwas_p_value']} "
f"{arg_dict['pop_gnomad']} "
f"{arg_dict['maf_upper_threshold']} "
f"{arg_dict['vep_pick_order']} "
f"{arg_dict['vep_n_forks']} "
f"{arg_dict['vep_buffer_size']} "
f"{arg_dict['vep_gencode_all']} "
f"{vep_no_intergenic} "
f"{vep_regulatory} "
f"{secondary_findings} "
f"{classify_all} "
f"{ignore_noncoding} "
f"{clinvar_ignore_noncancer} "
f"{diagnostic_grade_only} "
f"{docker_command_run_end}"
)
check_subprocess(logger, cpsr_report_command, debug)
logger.info("Finished")
if __name__=="__main__": main()
| |
import time
startTime = time.time()
timeCheckStart = time.time()
import matplotlib.pyplot as plt
from matplotlib.transforms import Affine2D
import numpy as np
from scipy.integrate import odeint
print "Import: ", time.time() - timeCheckStart
#Bogus values
temp = np.array([28.8,28.2,26.0,23.8,23.0,22.2,20.0,16.6,14.1,12.2,11.6,10.6,10.8,10.6,10.6,9.4,7.9,4.0,3.0,3.0,3.6,3.8,3.2,2.8,3.1,3.6,2.0,1.8,0.2,-1.5,-4.0,-4.1,-10.3,-11.3,-13.4,-14.1,-15.7,-23.1,-23.3,-24.2,-24.7,-27.5,-31.3,-31.7,-34.1,-37.3,-37.3,-37.5,-37.8,-45.2,-46.3,-47.3,-47.1,-51.5,-51.9,-52.9,-54.5,-54.3,-53.1,-60.0,-60.5,-60.9,-62.0,-62.9,-63.0,-63.1,-66.4,-68.7,-70.9,-70.3])
dewp = np.array([3.8,-3.8,-3.0,-2.2,-2.5,-2.8,-1.9,-0.4,-0.6,-0.8,-1.4,-2.4,-6.2,-5.4,-5.3,0.4,0.3,0.0,-0.1,-1.0,-9.4,-15.2,-11.8,-15.2,-17.8,-22.4,-21.0,-27.2,-29.7,-32.5,-25.3,-25.1,-37.3,-36.3,-36.9,-37.1,-42.7,-41.1,-41.3,-39.5,-38.7,-48.5,-55.3,-59.7,-63.4,-68.3,-68.3,-69.5,-69.7,-75.5,-76.3,-77.3,-77.1,-75.5,-74.9,-74.9,-75.5,-75.5,-75.1,-77.3,-77.5,-76.9,-77.5,-77.9,-78.0,-78.1,-80.2,-81.7,-82.9,-82.3])
pres = np.array([978.0,974.0,949.0,925.0,916.6,908.0,884.7,850.0,823.6,804.0,794.3,780.0,772.0,766.0,765.9,753.0,738.1,700.0,692.0,688.0,685.0,680.0,672.0,662.0,659.9,656.0,638.0,630.0,611.8,592.0,566.6,566.0,506.0,500.0,484.1,479.0,462.0,402.0,400.0,394.6,392.0,373.0,350.0,332.0,318.4,301.0,300.0,293.0,291.7,254.9,250.0,243.0,237.0,215.0,205.0,200.0,185.0,183.8,176.0,151.7,150.0,149.0,144.4,141.0,137.5,136.0,124.4,117.0,106.0,100.0])
wndU = np.array([28.8,28.2,26.0,23.8,23.0,22.2,20.0,16.6,14.1,12.2,11.6,10.6,10.8,10.6,10.6,9.4,7.9,4.0,3.0,3.0,3.6,3.8,3.2,2.8,3.1,3.6,2.0,1.8,0.2,-1.5,-4.0,-4.1,-10.3,-11.3,-13.4,-14.1,-15.7,-23.1,-23.3,-24.2,-24.7,-27.5,-31.3,-31.7,-34.1,-37.3,-37.3,-37.5,-37.8,-45.2,-46.3,-47.3,-47.1,-51.5,-51.9,-52.9,-54.5,-54.3,-53.1,-60.0,-60.5,-60.9,-62.0,-62.9,-63.0,-63.1,-66.4,-68.7,-70.9,-70.3])
wndV = np.array([3.8,-3.8,-3.0,-2.2,-2.5,-2.8,-1.9,-0.4,-0.6,-0.8,-1.4,-2.4,-6.2,-5.4,-5.3,0.4,0.3,0.0,-0.1,-1.0,-9.4,-15.2,-11.8,-15.2,-17.8,-22.4,-21.0,-27.2,-29.7,-32.5,-25.3,-25.1,-37.3,-36.3,-36.9,-37.1,-42.7,-41.1,-41.3,-39.5,-38.7,-48.5,-55.3,-59.7,-63.4,-68.3,-68.3,-69.5,-69.7,-75.5,-76.3,-77.3,-77.1,-75.5,-74.9,-74.9,-75.5,-75.5,-75.1,-77.3,-77.5,-76.9,-77.5,-77.9,-78.0,-78.1,-80.2,-81.7,-82.9,-82.3])
class skewTLogP():
#Assumes pressure in mb and temp in C
#Initialization Function
def __init__(self):
timeCheckStart = time.time()
#USER SPECIFIED VALUES (Mostly - I might change what they can do later)
self.tMin = -40.
self.tMax = 55.
self.pMax = 1100.
self.pMin = 100.
#Plot title
self.title = 'Skew-T Log-P Test Image'
#Whether or not to create parcel trace info
parcelTrace = 'T'
if parcelTrace == 'T':
pass
#figSize = (9,12)
figSize = (10*3/4, 10)
self.mixRatStop = 800 #Pressure at which to stop plotting mixing ratio
self.windOffset = 7 #% indent from right bound for wind axis
self.windOffsetPos = self.tMax - (self.windOffset/100.)*(self.tMax - self.tMin)
# TempProf DewpProf Isobar IsoT<0 IsoT=0 IsoT>0 DAdiabat MAdiabat MixRat Wind Axis Wind Barbs
self.colors = ['#ff0000', '#009900', '#000000', '#0000ff', '#0000ff', '#ff0000', '#880000', '#009900', '#ff9900', '#000000', '#404040']
self.lStyle = ['-' , '-' , '-' , '-' , '-' , '-' , '--' , '--' , ':' , '-' , '-' ]
self.lWidth = [1.8 , 1.8 , 1. , 0.25 , 0.75 , 0.25 , 0.5 , 0.5 , 2. , 1. , 0.5 ]
# Red DGreen Black Red Blue Blue Dark Red DGreen Orangish Black Lght Black
# Solid Solid Solid Dashed Dashed Dashed Dashed Dashed Dotted Solid Solid
# Normal Normal Normal ExSmall Med ExSmall Small Small Large Normal Small
#DATA VALUES - SHOULD BE PROVIDED BY GET DATA FUNCTION
self.data=np.zeros((len(pres),), dtype=[('pres','f4'), ('temp','f4'), ('dewp','f4'), ('wndU', 'f4'), ('wndV', 'f4')])
self.data['pres'] = pres
self.data['temp'] = temp
self.data['dewp'] = dewp
self.data['wndU'] = wndU
self.data['wndV'] = wndV
#END NEW INPUT - BEGIN PROGRAM
#Create figure instance
fig = plt.figure(figsize=figSize)
#Create ax2 and make it inverse logarithmic
self.ax2 = fig.add_subplot(111)
self.ax2.set_yscale('log')
self.ax2.set_xscale('linear')
self.ax2.set_ylim(self.ax2.get_ylim()[::-1])
#Create ax1 and make it inverse logarithmic and skewed
self.ax1 = self.ax2.twinx()
self.ax1.set_yscale('log')
self.ax1.set_xscale('linear')
self.ax1.set_ylim(self.ax1.get_ylim()[::-1])
self.ax1.transLimits = self.ax1.transLimits + Affine2D(np.array([[1.,1.,0.],[0.,1.,0.],[0.,0.,1.]]))
self.ax1.transData = self.ax1.transScale + (self.ax1.transLimits + self.ax1.transAxes)
print "Init: ", time.time() - timeCheckStart
self.new_analyzeParcel(temp, dewp, pres, wndU, wndV)
#Plotting Functions
def _plotBackground(self):
timeCheckStart = time.time()
#Create isobars
presVals = np.linspace(100,1000,10)
presVals = np.repeat(presVals,3)
tempVals = np.array([self.tMin, self.tMax, np.nan])
tempVals = np.tile(tempVals,(10))
self.ax2.plot(tempVals, presVals, color=self.colors[2], linestyle=self.lStyle[2], linewidth=self.lWidth[2])
print "Isob: ", time.time() - timeCheckStart
timeCheckStart = time.time()
#Create isotherms
#(Doing sep calls faster than using np.where() in this case)
presVals = [self.pMax, self.pMin, np.nan]
self.ax1.plot([0,0,0], presVals, color=self.colors[4], linestyle=self.lStyle[4], linewidth=self.lWidth[4])
tempVals = np.linspace(-110,-10,11)
tempVals = np.repeat(tempVals,3)
presVals_nE0 = np.tile(presVals,(11))
self.ax1.plot(tempVals, presVals_nE0, color=self.colors[3], linestyle=self.lStyle[3], linewidth=self.lWidth[3])
tempVals = np.linspace(10,40,4)
tempVals = np.repeat(tempVals,3)
presVals_nE0 = np.tile(presVals,(4))
self.ax1.plot(tempVals, presVals_nE0, color=self.colors[5], linestyle=self.lStyle[5], linewidth=self.lWidth[5])
print "Isot: ", time.time() - timeCheckStart
timeCheckStart = time.time()
presVals = np.arange(self.pMin, self.pMax+1., 10.)
#Create dry adiabats
potTempVals = np.arange(self.tMin + 10., (self.tMax - self.tMin)*2. + self.tMax + 1., 20.) ###Need to come back and make the default maximum automatic. Would also be nice to have the where statement here again.
dPresVals = np.append(presVals, np.nan)
dPresVals = np.tile(dPresVals, (potTempVals.shape[0]))
potTempVals = np.repeat(potTempVals, presVals.shape[0] + 1)
tempVals = self._new_getDryAdiabatTemp(potTempVals, dPresVals)
self.ax1.plot(tempVals, dPresVals, color=self.colors[6], linestyle=self.lStyle[6], linewidth=self.lWidth[6])
print "DAdi: ", time.time() - timeCheckStart
timeCheckStart = time.time()
#Moist Adiabats
tempVals = np.arange(self.tMin, (self.tMax - self.tMin)*2. + self.tMax + 1., 10.)
presVals = np.arange(self.pMin, self.pMax+1., 50.) #Note: smaller step values cause issues at lower moist adiabat values near 1000 mb
#XX NEED TO FIX ABOVE!
mPresVals = np.append(presVals, np.nan)
mPresVals = np.tile(mPresVals, (tempVals.shape[0]))
tempVals = np.repeat(tempVals, presVals.shape[0] + 1) # + 1 to account for nan
tempVals = self.new_wetLift(1000., tempVals, mPresVals)
self.ax1.plot(tempVals, mPresVals, color=self.colors[7], linestyle=self.lStyle[7], linewidth=self.lWidth[7])
print "MAdi: ", time.time() - timeCheckStart
timeCheckStart = time.time()
#Create mixing ratios
self.x2TickPres = np.array([self.pMax - (self.pMax - self.mixRatStop)/2.])
self.w = np.array([1., 2., 3., 5., 8., 13., 21.]) #Note that self.x2TickTemp is dependent upon the number of entries in self.w
presVals = np.where(presVals > self.mixRatStop, presVals, np.nan)
self.x2TickTemp = self._new_getMixingRatioTemp(self.w, np.repeat(self.x2TickPres, 7))
for mixRat in self.w:
tempVals = self._new_getMixingRatioTemp(mixRat, presVals)
self.ax1.plot(tempVals, presVals, color=self.colors[8], linestyle=self.lStyle[8], linewidth=self.lWidth[8])
print "MixR: ", time.time() - timeCheckStart
timeCheckStart = time.time()
#Create wind profile line
self.ax2.plot([self.windOffsetPos, self.windOffsetPos], [self.pMax, self.pMin], color=self.colors[9], linestyle=self.lStyle[9], linewidth=self.lWidth[9])
print "WndP: ", time.time() - timeCheckStart
def _plotProfile(self):
timeCheckStart = time.time()
#Plot temperature, dewpoint, and wind profiles
self.ax1.plot(self.data['temp'], self.data['pres'], color=self.colors[0], linestyle=self.lStyle[0], linewidth=self.lWidth[0])
self.ax1.plot(self.data['dewp'], self.data['pres'], color=self.colors[1], linestyle=self.lStyle[1], linewidth=self.lWidth[1])
self.ax2.barbs(self.windOffsetPos*np.ones(len(self.data['wndU'])), self.data['pres'], self.data['wndU'], self.data['wndV'], color=self.colors[10], linestyle=self.lStyle[10], linewidth=self.lWidth[10])
print "Plot Profile: ", time.time() - timeCheckStart
def _plotAxes(self):
timeCheckStart = time.time()
#Create temperature labels
xTickPos = np.arange(self.tMin, self.tMax + 1, 10)
xTickStr = xTickPos.astype('<U10')
for t in range(len(xTickStr)):
xTickStr[t] += u'\u00b0C'
self.ax1.set_xticks(xTickPos)
self.ax1.set_xticklabels(xTickStr, clip_on=False)
print "AxTemp: ", time.time() - timeCheckStart
timeCheckStart = time.time()
#Create pressure labels ###only plots labels from 1000 - 100 mb!
yTickPos = np.arange(100,1001,100)
yTickStr = yTickPos.astype('<U10')
for p in range(len(yTickStr)):
yTickStr[p] += u'mb'
self.ax2.set_yticks(yTickPos)
self.ax2.set_yticklabels(yTickStr)
print "AxPres: ", time.time() - timeCheckStart
timeCheckStart = time.time()
#Create mixing ratio labels
for i in range(len(self.x2TickTemp)):
self.ax1.text(self.x2TickTemp[i], self.x2TickPres[0], str(self.w[i]), ha='center', va='center', alpha=.5, size='smaller')
print "AxMixR: ", time.time() - timeCheckStart
#Clears RHS y-axis labels
self.ax1.set_yticklabels([])
def _finalizePlot(self):
#Set plot bounds
self.ax1.set_ybound(self.pMax, self.pMin)
self.ax1.set_xbound(self.tMin, self.tMax)
self.ax2.set_ybound(self.pMax, self.pMin)
self.ax2.set_xbound(self.tMin, self.tMax)
plt.title(self.title)
#Save and display image
plt.savefig('skewT.png')
#plt.show()
def plot(self):
#Primary driver function for skewTLogP object
self._plotProfile()
self._plotBackground()
self._plotAxes()
self._finalizePlot()
#Helper Functions
def _new_getMoistAdiabaticLapseRate(self, temp, pres):
###NOTE: Must use K and Pa units for inputs. Don't know why, but it screws up if you don't and convert later
###Calculate moist adiabatic lapse rate (see Bluestein)
sMixR = (1e-3)*self._new_getMixingRatio(self._new_getVaporPres(temp - 273.15), (1/100.)*pres)
return (278.*temp + sMixR*2.526) / (pres * (1005. + .62198 * sMixR * 2.5e12 / (461.5 * temp**2)))
def _new_getDryAdiabatTemp(self, potT, pres):
"""
Calculate temperature along dry adiabat at a given pressure.
Input:
potT [C] : potential temperature
pres [mb] : pressure
Output:
temp [C] : temperature along dry adiabat
Calculation:
Poisson solution
temp = ((potT - 273.15) * (pres / 1000)^(gamma)) - 273.15
where
gamma = .2858565737 [unitless]
"""
return (potT + 273.15)*np.power(pres/1000., .2858565737) - 273.15
def _new_getPotTemp(self, temp, pres):
"""
Calculate potential temperature for a given temperature and pressure
Input:
temp [C] : temperature
pres [mb] : pressure
Output:
potT [C] : potential temperature
Calculation:
Poisson solution
potT = ((temp - 273.15) * (1000 / pres)^(gamma)) - 273.15
where
gamma = .2858565737 [unitless]
"""
return (temp + 273.15)*np.power(1000./pres, .2858565737) - 273.15
def new_getSatAdiabatTemp(self, sPotT, pres):
"""
Calculate temperature along a moist adiabat at a given pressure
Input:
sPotT [C] : saturation potential temperature
pres [mb] : pressure
Output:
temp [C] : temperature
Calculation:
(From Stipanuk 1973)
"""
print ""
temp = 253.15
for i in range(12):
i += 1
dTemp = (120. / 2**i) * np.sign((sPotT + 273.15) * np.exp(-2.6518986 * self._new_getMixingRatio(self._new_getVaporPres(temp - 273.15), pres) / temp) - (self._new_getPotTemp(temp - 273.15, pres) + 273.15))
print "A: ", -2.6518986 * self._new_getMixingRatio(self._new_getVaporPres(temp - 273.15), pres) / temp
print "B: ", self._new_getPotTemp(temp - 273.15, pres) + 273.15
temp += dTemp
print "d: ", dTemp
print "T: ", temp
print temp - 273.15
return temp - 273.15
def _new_getSatPotTemp(self, temp, pres):
"""
Calculate saturation potential temperature for a given temperature and pressure
Input:
temp [C] : temperature
pres [mb] : pressure
Output:
sPotT [C] : saturation potential temperature
Calculation:
(From Stipanuk 1973)
sPotT = _new_getPotTemp(temp, pres) / exp(b * _new_getMixingRatio(_new_getVaporPres(temp), pres) / (temp + 273.15)) - 273.15
where
b = -2.6518986
"""
return self._new_getPotTemp(temp, pres) / np.exp(-2.6518986 * self._new_getMixingRatio(self._new_getVaporPres(temp), pres) / (temp + 273.15)) - 273.15
def _new_getMixingRatioTemp(self, mixR, pres):
"""
Calculate dewpoint (regular) temperature for a given (saturation) mixing ratio and pressure
Input:
mixR [g/kg] : (saturation) mixing ratio
pres [mb] : pressure
Output:
temp [C] : dewpoint (regular) temperature
Calculation:
A = pres * mixR / (6.11 * (622 - mixR))
B = 1 / 273.15 - 1.846e-4 * ln(A)
temp = 1 / B - 273.15
"""
return 1/(1/273.15 - 1.846e-4*np.log(pres*mixR / (6.11*(622 - mixR)))) - 273.15
#A = np.log10(mixR * pres / (622 + mixR))
#B = 10 ** (0.0498646455 * A + 2.4082965)
#C = (10 ** (0.0915 * A) - 1.2035) ** 2
#return B - 280.23475 + 38.9114 * C
def _new_getMixingRatio(self, vPres, pres): #W()
"""
Calculate (saturation) mixing ratio for a given (saturation) vapor pressure and pressure
Input:
vPres [mb] : (saturation) vapor pressure
pres [mb] : pressure
Output:
mixR [g/kg] : (saturation) mixing ratio
Calculation:
mixR = 622 * vPres / (100*pres - vPres)
"""
return 622. * vPres / (pres - vPres)
def _new_getVaporPres(self, temp): #ESAT()
"""
Calculate (saturation) vapor pressure
Input:
temp [C] : dewpoint (regular) temperature
Output:
vPres [mb] : (saturation) vapor pressure
Calculation:
(From Stipanuk 1973 -> Nordquist 1973)
"""
return 6.11 * np.exp(5417.118093 * (1 / 273.15 - 1 / (temp + 273.15)))
#temp += 273.15
#A = 23.832241 - 5.02808 * np.log10(temp)
#B = 1.3816e7 * 10 ** (11.344 - 0.0303998 * temp)
#C = 8.1328e3 * 10 ** (3.49149 - 1302.8844 / temp)
#return 10 ** (A - B + C - 2949.076 / temp)
def _new_getRH(self, temp, dewp):
"""
Calculate relative humidity of a parcel
Input:
temp [C] : temperature
dewp [C] : dewpoint temperature
Output:
relH [%] : relative humidity
Calculation:
relH = 100% * _new_getVaporPres(dewp) / _new_getVaporPres(temp)
"""
return 100*(self._new_getVaporPres(dewp)/self._new_getVaporPres(temp))
def _new_getIntersect(tA1, tA2, tB1, tB2, p1, p2):
#Based on http://paulbourke.net/geometry/lineline2d/
#HAVEN'T TESTED THIS YET!
#Assumes pressure levels (y vals) are the same
uA = ((tB2 - tB1)*(p1 - p2) - (p1 - p2)*(tA1 - tB1)) / ((p1 - p2)*(tA2 - tA1) - (tB2 - tB1)*(p2 - p1))
return (tA1 + uA*(tA2 - tA1), p1 + uA*(p2 - p1))
def new_analyzeParcel(self, temp, dewp, pres, wndU, wndV):
#NOTE: Assumes pressure is decreasing with increasing index
mixR = self._new_getMixingRatio(self._new_getVaporPres(dewp[0]), pres[0])
#Calculate LCL (Stipanuk)
dAdi = self._new_getPotTemp(temp[0], pres[0])
pLCL = pres[0]
for i in range(10):
mRT = self._new_getMixingRatioTemp(mixR, pLCL)
dAT = self._new_getDryAdiabatTemp(dAdi, pLCL)
check = 0.02 * (mRT - dAT)
if abs(check) < .001:
continue
pLCL = pLCL * 2 ** check
tLCL = mRT
print "---> LCL P, T: ", pLCL, tLCL
#Calculate Wet Bulb (Builds on LFL calc)
#sAdi = self._new_getSatPotTemp(tLCL, pLCL)
#tWbl = self._new_getSatAdiabatTemp(sAdi, pres[0])
#print "Wet bulb T: ", tWbl
#Calculate LFC (Builds on Wet Bulb calc)
#This is an rough calculation. Will likely implement better method / improve this one later.
#Don't trust this is delta P of measured pressure levels is large!
#Assumes not saturated at surface
#sAT = self._new_getSatAdiabatTemp(sAdi, pres)
#init = np.where(sAT > temp)[0]
#pLFC = (pres[init[0] - 1] + pres[init[0]]) / 2.
#tLFC = (temp[init[0] - 1] + temp[init[0]]) / 2.
#print "---> LFC P, T: ", pLFC, tLFC
#Calculate EQL (builds on LFC calc)
#This is an rough calculation. Will likely implement better method / improve this one later.
#Don't trust this is delta P of measured pressure levels is large!
#Assumes not saturated at surface
#A = np.where(pres > pLFC)[0]
#newP = pres[A[0]:A[-1]]
#newT = temp[A[0]:A[-1]]
#sAT = self._new_getSatAdiabatTemp(sAdi, newP)
#init = np.where(sAT < newT)[0]
#pEQL = (newP[init[0] - 1] + newP[init[0]]) / 2.
#tEQL = (newT[init[0] - 1] + newT[init[0]]) / 2.
#print "---> EQL P, T: ", pEQL, tEQL
#Calculate CCL
#This is an rough calculation. Will likely implement better method / improve this one later.
#Don't trust this is delta P of measured pressure levels is large!
#Assumes not saturated at surface
mRT = self._new_getMixingRatioTemp(mixR, pres)
init = np.where(mRT > temp)[0]
pCCL = (pres[init[0] - 1] + pres[init[0]]) / 2.
print "---> CCL P : ", pCCL
tCCL = (temp[init[0] - 1] + temp[init[0]]) / 2.
#Calculate Convective Temp
dAdi = self._new_getPotTemp(tCCL, pCCL)
tCon = self._new_getDryAdiabatTemp(dAdi, pres[0])
print "---> Conv T : ", tCon
#Plot info on skew t
self.ax1.text(tLCL, pLCL, " LCL", ha='left', va='center', size='smaller')
self.ax1.plot(tLCL, pLCL, 'k_', markeredgewidth=2, markersize=11)
self.ax1.text(tCCL, pCCL, " CCL", ha='left', va='center', size='smaller')
self.ax1.plot(tCCL, pCCL, 'k_', markeredgewidth=2, markersize=11)
self.ax1.text(tCon, pres[0], " TCon", ha='left', va='center', size='smaller')
self.ax1.plot(tCon, pres[0], 'kx', markeredgewidth=2)
#####
#####
#This bit based on functions found in nsharp's thermo.c
def wobusFunc(self, temp):
#/* pres - Pressure to raise parcel (mb) */
#/* thm - Sat. Pot. Temperature of parcel (c) */
temp = temp - 20
if (temp <= 0):
A = 1. + temp*(-8.841660499999999e-03 + temp*(1.4714143e-04 + temp*(-9.671989000000001e-07 + temp*(-3.2607217e-08 + temp*(-3.8598073e-10)))))
return 15.13 / A**4
else:
A = temp*(4.9618922e-07 + temp*(-6.1059365e-09 + temp*(3.9401551e-11 + temp*(-1.2588129e-13 + temp*(1.6688280e-16)))))
A = 1 + temp*(3.6182989e-03 + temp*(-1.3603273e-05 + A))
return 29.94 / A**4 + .96*temp - 14.8
def new_wobusFunc(self, temp):
#Wobus function designed to work with an array of temps
#Used in conjunction with new_wobusFunc_lt0 and new_wobusFunc_gt0
temp = temp - 20
return np.where(temp <= 0, self.new_wobusFunc_lt0(temp), self.new_wobusFunc_gt0(temp))
def new_wobusFunc_lt0(self, temp):
A = 1. + temp*(-8.841660499999999e-03 + temp*(1.4714143e-04 + temp*(-9.671989000000001e-07 + temp*(-3.2607217e-08 + temp*(-3.8598073e-10)))))
return 15.13 / A**4
def new_wobusFunc_gt0(self, temp):
A = temp*(4.9618922e-07 + temp*(-6.1059365e-09 + temp*(3.9401551e-11 + temp*(-1.2588129e-13 + temp*(1.6688280e-16)))))
A = 1 + temp*(3.6182989e-03 + temp*(-1.3603273e-05 + A))
return 29.94 / A**4 + .96*temp - 14.8
def wetLift(self, presStart, tempStart, presEnd):
#p (float) Pressure of initial parcel (hPa)
#t (float) Temperature of initial parcel (C)
#p2 (float) Pressure of final level (hPa)
# =>Temperature (C [float])
potT = self._new_getPotTemp(tempStart, presStart)
corrT = potT - self.wobusFunc(potT) + self.wobusFunc(tempStart)
return self.satLift(presEnd, corrT)
def new_wetLift(self, presStart, tempStart, presEnd):
#p (float) Pressure of initial parcel (hPa)
#t (float) Temperature of initial parcel (C)
#p2 (float) Pressure of final level (hPa)
# =>Temperature (C [float])
potT = self._new_getPotTemp(tempStart, presStart)
corrT = potT - self.new_wobusFunc(potT) + self.new_wobusFunc(tempStart)
return self.new_satLift(presEnd, corrT)
def satLift(self, pres, temp):
#/* Returns the temperature (c) of a parcel (thm), */
#/* when lifted to level (pres). */
#/* */
#/* pres - Pressure to raise parcel (mb) */
#/* thm - Sat. Pot. Temperature of parcel (c) */
if np.abs(pres - 1000.) - 1.e-3 <= 0:
return temp
e0 = 999.
while (np.abs(e0) - .1 > 0):
if e0 == 999.:
powF = np.power(pres/1000., .2858565737)
t1 = self._new_getPotTemp(temp, pres)
woto = self.wobusFunc(t1)
wotm = self.wobusFunc(temp)
e1 = woto - wotm
rate = 1.
else:
rate = (t2 - t1) / (e2 - e1)
t1 = t2
e1 = e2
t2 = t1 - e1 * rate
e2 = (t2 + 273.15) / powF - 273.15
wot2 = self.wobusFunc(t2)
woe2 = self.wobusFunc(e2)
e2 = e2 + wot2 - woe2 - temp
e0 = e2 * rate
return t2 - e0
def new_satLift(self,pres,temp):
return np.where(np.abs(pres - 1000.) <= 1.e-3, temp, self.new_satLift_gtVal(pres, temp))
def new_satLift_gtVal(self,pres,temp):
e0 = 999.* np.ones(temp.shape)
start = 'Y'
#left = temp.shape[0]
count = 1
while np.any(np.abs(e0) - .1 > 0):
if count >= 100:
print("ERROR: Call to function new_satLift_gtVal() failed after {0} iterations.").format(count)
notDoneIdx = np.where(np.abs(e0) > .1)[0]
count += 1
if start == 'Y':
start = 'N'
powF = np.power(pres/1000., .2858565737)
t1 = self._new_getPotTemp(temp, pres)
woto = self.new_wobusFunc(t1)
wotm = self.new_wobusFunc(temp)
e1 = woto - wotm
rate = np.ones(temp.shape)
#Just initialize these values as arrays - they will be overwritten
t2 = np.ones(temp.shape)
e2 = np.ones(temp.shape)
wot2 = np.ones(temp.shape)
woe2 = np.ones(temp.shape)
else:
rate[notDoneIdx] = (t2[notDoneIdx] - t1[notDoneIdx]) / (e2[notDoneIdx] - e1[notDoneIdx])
t1[notDoneIdx] = t2[notDoneIdx]
e1[notDoneIdx] = e2[notDoneIdx]
#Values correct until here
t2[notDoneIdx] = t1[notDoneIdx] - e1[notDoneIdx] * rate[notDoneIdx]
e2[notDoneIdx] = (t2[notDoneIdx] + 273.15) / powF[notDoneIdx] - 273.15
wot2[notDoneIdx] = self.new_wobusFunc(t2[notDoneIdx])
woe2[notDoneIdx] = self.new_wobusFunc(e2[notDoneIdx])
e2[notDoneIdx] = e2[notDoneIdx] + wot2[notDoneIdx] - woe2[notDoneIdx] - temp[notDoneIdx]
e0[notDoneIdx] = e2[notDoneIdx] * rate[notDoneIdx]
return t2 - e0
#####
#####
x = skewTLogP()
x.plot()
#x.solve(0., 1000, 1.2e-3)
#x.new_getSatAdiabatTemp(np.array([-40, -20,-10,0,10,20,40]), np.array([1000,1000,1000,1000,1000,1000,1000]))
#p = np.array([1000.,900.,800.,700.,600.,500.,400.,300.,200.,100.])
#t = np.array([20. ,20., 20., 20., 20., 20., 20., 20., 20., 20.])
#tk = 273.15 + np.array([20. ,20., 20., 20., 20., 20., 20., 20., 20., 20.])
#print x.TSA(np.array([273.15]), np.array([1000.]))
#print x.OS(np.array([273.15]), np.array([1000.]))
endTime = time.time()
totalTime = endTime - startTime
print("Total time: {0} s".format(totalTime))
| |
# -*- coding: utf-8 -*-
"""
Base settings file, common to all environments.
These settings can be overridden in local.py.
"""
import datetime
import os
import json
import hashlib
from datetime import timedelta
os_env = os.environ
def parent_dir(path):
'''Return the parent of a directory.'''
return os.path.abspath(os.path.join(path, os.pardir))
HERE = os.path.dirname(os.path.abspath(__file__))
BASE_PATH = parent_dir(HERE) # website/ directory
APP_PATH = parent_dir(BASE_PATH)
ADDON_PATH = os.path.join(BASE_PATH, 'addons')
STATIC_FOLDER = os.path.join(BASE_PATH, 'static')
STATIC_URL_PATH = '/static'
ASSET_HASH_PATH = os.path.join(APP_PATH, 'webpack-assets.json')
ROOT = os.path.join(BASE_PATH, '..')
BCRYPT_LOG_ROUNDS = 12
with open(os.path.join(APP_PATH, 'package.json'), 'r') as fobj:
VERSION = json.load(fobj)['version']
# Hours before email confirmation tokens expire
EMAIL_TOKEN_EXPIRATION = 24
CITATION_STYLES_PATH = os.path.join(BASE_PATH, 'static', 'vendor', 'bower_components', 'styles')
# Hours before pending embargo/retraction/registration automatically becomes active
RETRACTION_PENDING_TIME = datetime.timedelta(days=2)
EMBARGO_PENDING_TIME = datetime.timedelta(days=2)
REGISTRATION_APPROVAL_TIME = datetime.timedelta(days=2)
# Date range for embargo periods
EMBARGO_END_DATE_MIN = datetime.timedelta(days=2)
EMBARGO_END_DATE_MAX = datetime.timedelta(days=1460) # Four years
LOAD_BALANCER = False
PROXY_ADDRS = []
# May set these to True in local.py for development
DEV_MODE = False
DEBUG_MODE = False
LOG_PATH = os.path.join(APP_PATH, 'logs')
TEMPLATES_PATH = os.path.join(BASE_PATH, 'templates')
ANALYTICS_PATH = os.path.join(BASE_PATH, 'analytics')
CORE_TEMPLATES = os.path.join(BASE_PATH, 'templates/log_templates.mako')
BUILT_TEMPLATES = os.path.join(BASE_PATH, 'templates/_log_templates.mako')
DOMAIN = 'http://localhost:5000/'
API_DOMAIN = 'http://localhost:8000/'
GNUPG_HOME = os.path.join(BASE_PATH, 'gpg')
GNUPG_BINARY = 'gpg'
# User management & registration
CONFIRM_REGISTRATIONS_BY_EMAIL = True
ALLOW_REGISTRATION = True
ALLOW_LOGIN = True
SEARCH_ENGINE = 'elastic' # Can be 'elastic', or None
ELASTIC_URI = 'localhost:9200'
ELASTIC_TIMEOUT = 10
ELASTIC_INDEX = 'website'
SHARE_ELASTIC_URI = ELASTIC_URI
SHARE_ELASTIC_INDEX = 'share'
# For old indices
SHARE_ELASTIC_INDEX_TEMPLATE = 'share_v{}'
# Sessions
# TODO: Override OSF_COOKIE_DOMAIN in local.py in production
OSF_COOKIE_DOMAIN = None
COOKIE_NAME = 'osf'
# TODO: Override SECRET_KEY in local.py in production
SECRET_KEY = 'CHANGEME'
# Change if using `scripts/cron.py` to manage crontab
CRON_USER = None
# External services
USE_CDN_FOR_CLIENT_LIBS = True
USE_EMAIL = True
FROM_EMAIL = 'openscienceframework-noreply@osf.io'
SUPPORT_EMAIL = 'support@osf.io'
# SMTP Settings
MAIL_SERVER = 'smtp.sendgrid.net'
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = '' # Set this in local.py
# OR, if using Sendgrid's API
SENDGRID_API_KEY = None
# Mailchimp
MAILCHIMP_API_KEY = None
MAILCHIMP_WEBHOOK_SECRET_KEY = 'CHANGEME' # OSF secret key to ensure webhook is secure
ENABLE_EMAIL_SUBSCRIPTIONS = True
MAILCHIMP_GENERAL_LIST = 'Open Science Framework General'
#Triggered emails
OSF_HELP_LIST = 'Open Science Framework Help'
WAIT_BETWEEN_MAILS = timedelta(days=7)
NO_ADDON_WAIT_TIME = timedelta(weeks=8)
NO_LOGIN_WAIT_TIME = timedelta(weeks=4)
WELCOME_OSF4M_WAIT_TIME = timedelta(weeks=2)
NO_LOGIN_OSF4M_WAIT_TIME = timedelta(weeks=6)
NEW_PUBLIC_PROJECT_WAIT_TIME = timedelta(hours=24)
WELCOME_OSF4M_WAIT_TIME_GRACE = timedelta(days=12)
# TODO: Override in local.py
MAILGUN_API_KEY = None
# TODO: Override in local.py in production
UPLOADS_PATH = os.path.join(BASE_PATH, 'uploads')
MFR_CACHE_PATH = os.path.join(BASE_PATH, 'mfrcache')
MFR_TEMP_PATH = os.path.join(BASE_PATH, 'mfrtemp')
# Use Celery for file rendering
USE_CELERY = True
# Use GnuPG for encryption
USE_GNUPG = True
# File rendering timeout (in ms)
MFR_TIMEOUT = 30000
# TODO: Override in local.py in production
DB_HOST = 'localhost'
DB_PORT = os_env.get('OSF_DB_PORT', 27017)
DB_NAME = 'osf20130903'
DB_USER = None
DB_PASS = None
# Cache settings
SESSION_HISTORY_LENGTH = 5
SESSION_HISTORY_IGNORE_RULES = [
lambda url: '/static/' in url,
lambda url: 'favicon' in url,
lambda url: url.startswith('/api/'),
]
# TODO: Configuration should not change between deploys - this should be dynamic.
CANONICAL_DOMAIN = 'openscienceframework.org'
COOKIE_DOMAIN = '.openscienceframework.org' # Beaker
SHORT_DOMAIN = 'osf.io'
# TODO: Combine Python and JavaScript config
COMMENT_MAXLENGTH = 500
# Profile image options
PROFILE_IMAGE_LARGE = 70
PROFILE_IMAGE_MEDIUM = 40
PROFILE_IMAGE_SMALL = 20
# Conference options
CONFERENCE_MIN_COUNT = 5
WIKI_WHITELIST = {
'tags': [
'a', 'abbr', 'acronym', 'b', 'bdo', 'big', 'blockquote', 'br',
'center', 'cite', 'code',
'dd', 'del', 'dfn', 'div', 'dl', 'dt', 'em', 'embed', 'font',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins',
'kbd', 'li', 'object', 'ol', 'param', 'pre', 'p', 'q',
's', 'samp', 'small', 'span', 'strike', 'strong', 'sub', 'sup',
'table', 'tbody', 'td', 'th', 'thead', 'tr', 'tt', 'ul', 'u',
'var', 'wbr',
],
'attributes': [
'align', 'alt', 'border', 'cite', 'class', 'dir',
'height', 'href', 'id', 'src', 'style', 'title', 'type', 'width',
'face', 'size', # font tags
'salign', 'align', 'wmode', 'target',
],
# Styles currently used in Reproducibility Project wiki pages
'styles' : [
'top', 'left', 'width', 'height', 'position',
'background', 'font-size', 'text-align', 'z-index',
'list-style',
]
}
# Add-ons
# Load addons from addons.json
with open(os.path.join(ROOT, 'addons.json')) as fp:
addon_settings = json.load(fp)
ADDONS_REQUESTED = addon_settings['addons']
ADDONS_ARCHIVABLE = addon_settings['addons_archivable']
ADDONS_COMMENTABLE = addon_settings['addons_commentable']
ADDON_CATEGORIES = [
'documentation',
'storage',
'bibliography',
'other',
'security',
'citations',
]
SYSTEM_ADDED_ADDONS = {
# 'user': ['badges'],
'user': [],
'node': [],
}
# Piwik
# TODO: Override in local.py in production
PIWIK_HOST = None
PIWIK_ADMIN_TOKEN = None
PIWIK_SITE_ID = None
KEEN_PROJECT_ID = None
KEEN_WRITE_KEY = None
SENTRY_DSN = None
SENTRY_DSN_JS = None
# TODO: Delete me after merging GitLab
MISSING_FILE_NAME = 'untitled'
# Dashboard
ALL_MY_PROJECTS_ID = '-amp'
ALL_MY_REGISTRATIONS_ID = '-amr'
ALL_MY_PROJECTS_NAME = 'All my projects'
ALL_MY_REGISTRATIONS_NAME = 'All my registrations'
# FOR EMERGENCIES ONLY: Setting this to True will disable forks, registrations,
# and uploads in order to save disk space.
DISK_SAVING_MODE = False
# Seconds before another notification email can be sent to a contributor when added to a project
CONTRIBUTOR_ADDED_EMAIL_THROTTLE = 24 * 3600
# Google Analytics
GOOGLE_ANALYTICS_ID = None
GOOGLE_SITE_VERIFICATION = None
# Pingdom
PINGDOM_ID = None
DEFAULT_HMAC_SECRET = 'changeme'
DEFAULT_HMAC_ALGORITHM = hashlib.sha256
WATERBUTLER_URL = 'http://localhost:7777'
WATERBUTLER_ADDRS = ['127.0.0.1']
# Test identifier namespaces
DOI_NAMESPACE = 'doi:10.5072/FK2'
ARK_NAMESPACE = 'ark:99999/fk4'
EZID_USERNAME = 'changeme'
EZID_PASSWORD = 'changeme'
# Format for DOIs and ARKs
EZID_FORMAT = '{namespace}osf.io/{guid}'
USE_SHARE = True
SHARE_REGISTRATION_URL = ''
SHARE_API_DOCS_URL = ''
CAS_SERVER_URL = 'http://localhost:8080'
MFR_SERVER_URL = 'http://localhost:7778'
###### ARCHIVER ###########
ARCHIVE_PROVIDER = 'osfstorage'
MAX_ARCHIVE_SIZE = 5 * 1024 ** 3 # == math.pow(1024, 3) == 1 GB
MAX_FILE_SIZE = MAX_ARCHIVE_SIZE # TODO limit file size?
ARCHIVE_TIMEOUT_TIMEDELTA = timedelta(1) # 24 hours
ENABLE_ARCHIVER = True
JWT_SECRET = 'changeme'
JWT_ALGORITHM = 'HS256'
##### CELERY #####
# Default RabbitMQ broker
BROKER_URL = 'amqp://'
# Default RabbitMQ backend
CELERY_RESULT_BACKEND = 'amqp://'
# Modules to import when celery launches
CELERY_IMPORTS = (
'framework.tasks',
'framework.tasks.signals',
'framework.email.tasks',
'framework.analytics.tasks',
'website.mailchimp_utils',
'website.notifications.tasks',
'website.archiver.tasks',
'website.search.search',
'api.caching.tasks'
)
# celery.schedule will not be installed when running invoke requirements the first time.
try:
from celery.schedules import crontab
except ImportError:
pass
else:
# Setting up a scheduler, essentially replaces an independent cron job
CELERYBEAT_SCHEDULE = {
'5-minute-emails': {
'task': 'notify.send_users_email',
'schedule': crontab(minute='*/5'),
'args': ('email_transactional',),
},
'daily-emails': {
'task': 'notify.send_users_email',
'schedule': crontab(minute=0, hour=0),
'args': ('email_digest',),
},
}
WATERBUTLER_JWE_SALT = 'yusaltydough'
WATERBUTLER_JWE_SECRET = 'CirclesAre4Squares'
WATERBUTLER_JWT_SECRET = 'ILiekTrianglesALot'
WATERBUTLER_JWT_ALGORITHM = 'HS256'
WATERBUTLER_JWT_EXPIRATION = 15
DRAFT_REGISTRATION_APPROVAL_PERIOD = datetime.timedelta(days=10)
assert (DRAFT_REGISTRATION_APPROVAL_PERIOD > EMBARGO_END_DATE_MIN), 'The draft registration approval period should be more than the minimum embargo end date.'
PREREG_ADMIN_TAG = "prereg_admin"
ENABLE_INSTITUTIONS = False
| |
from dateutil import tz
from functools import reduce
import waffle
from django.db.models import Q
from django.utils import timezone
from django.utils.safestring import mark_safe
from core.common.mongo import c_chat_context
from ct.models import UnitStatus, Response, NEED_HELP_STATUS, DONE_STATUS, NEED_REVIEW_STATUS
from ct.templatetags.ct_extras import md2html
from chat.models import Message, UnitError, YES_NO_OPTIONS
from chat.utils import is_last_thread, has_updates
class START(object):
"""
Initialize data for viewing a courselet.
Go immediately to first lesson (not yet completed).
"""
title = 'Start updates flow'
edges = (
dict(name='next', toNode='UPDATES', title='Present common update message'),
)
# TODO add unittests
def update_activity(self, chat_id: int, thread_id: int) -> None:
c_chat_context().update_one(
{"chat_id": chat_id},
{"$set": {
"thread_id": thread_id,
f"activity.{thread_id}": timezone.now(),
"need_faqs": False
}},
upsert=True
)
def collect_updates(self, node, fsmStack, request, **kwargs):
# TODO add unittests
chat = kwargs.get('chat')
unit_lesson = kwargs.get('unitlesson')
response = chat.message_set.filter(
lesson_to_answer_id=unit_lesson.id,
kind='response',
contenttype='response',
content_id__isnull=False).first().content
affected_ems = [i.errorModel for i in response.studenterror_set.all()]
context = c_chat_context().find_one({"chat_id": chat.id})
last_access_time = context.get('activity', {}).get(f"{unit_lesson.id}") if context else None
tz_aware_datetime = (
last_access_time.replace(tzinfo=tz.tzutc()) if last_access_time else
chat.last_modify_timestamp.replace(tzinfo=tz.tzutc()))
# Collect EMs resolutions. Don't filter by user
em_resolutions = unit_lesson.em_resolutions(tz_aware_datetime, affected_ems)
fsmStack.state.set_data_attr('em_resolutions', em_resolutions) if em_resolutions else None
thread_answer = unit_lesson.get_answers().first()
interested_faqs = thread_answer.response_set.filter(
Q(
kind=Response.STUDENT_QUESTION, inquirycount__addedBy=request.user
) | Q(author=request.user, kind=Response.STUDENT_QUESTION))
# Collect FAQ answers. Do filter by user as well as by applied previously FAQs
faq_answers = unit_lesson.faq_answers(tz_aware_datetime, request.user, interested_faqs)
fsmStack.state.set_data_attr('faq_answers', faq_answers) if faq_answers else None
# Collect new EMs. Don't filter by user
new_ems = unit_lesson.new_ems(tz_aware_datetime)
fsmStack.state.set_data_attr('new_ems', new_ems) if new_ems else None
# Collect ne FAQs. Do filter by user
new_faqs = unit_lesson.new_faqs(tz_aware_datetime, request.user)
fsmStack.state.set_data_attr('new_faqs', new_faqs) if new_faqs else None
def start_event(self, node, fsmStack, request, **kwargs):
"""
Event handler for START node.
"""
unit = fsmStack.state.get_data_attr('unit')
fsmStack.state.title = 'Study: %s' % unit.title
chat = kwargs.get('chat')
unit_lesson = kwargs.get('unitlesson')
self.collect_updates(node, fsmStack, request, **kwargs)
self.update_activity(chat.id, unit_lesson.id)
try: # use unitStatus if provided
unitStatus = fsmStack.state.get_data_attr('unitStatus')
except AttributeError: # create new, empty unitStatus
unitStatus = UnitStatus(unit=unit, user=request.user)
unitStatus.save()
fsmStack.state.set_data_attr('unitStatus', unitStatus)
fsmStack.state.unitLesson = kwargs.get('unitlesson') or unitStatus.get_lesson()
return fsmStack.state.transition(
fsmStack, request, 'next', useCurrent=True, **kwargs
)
def get_lesson_url(self, node, state, request, **kwargs):
"""
Get URL for any lesson.
"""
course = state.get_data_attr('course')
unitStatus = state.get_data_attr('unitStatus')
ul = unitStatus.get_lesson()
return ul.get_study_url(course.pk)
class UPDATES(object):
"""
View a lesson updates.
"""
get_path = get_lesson_url
title = 'View updates'
edges = (
dict(name='next', toNode='FAILEDTRANSITION', title='Go to new resolutions'),
)
def next_edge(self, edge, *args, **kwargs):
if args and 'em_resolutions' in args[0].state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_RESOLUTIONS')
elif args and 'faq_answers' in args[0].state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_ANSWERS')
elif args and 'new_ems' in args[0].state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_EMS')
elif args and 'new_faqs' in args[0].state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_FAQS')
return edge.toNode
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
data = chat.state.load_json_data()
if any(('em_resolutions' in data,
'faq_answers' in data,
'new_ems' in data,
'new_faqs' in data)):
text = 'There are new updates for a Thread you asked for a help.'
c_chat_context().update_one(
{"chat_id": chat.id},
{"$set": {"actual_ul_id": chat.state.unitLesson.id}}
)
else:
text = 'I can\'t find updates for you.'
_data = {
'chat': chat,
'text': text,
'owner': chat.user,
'input_type': 'custom',
'kind': 'message',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class SHOW_NEW_RESOLUTIONS(object):
"""
Show all new Resolutions.
"""
title = 'View resolutions'
edges = (
dict(name='next', toNode='SHOW_EM', title='Show resolution'),
)
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
_data = {
'chat': chat,
'text': 'New resolutions for your miscoceptions have been added. \
Hope it will help you to overcame your misunderstanding.',
'owner': chat.user,
'input_type': 'custom',
'kind': 'message',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class SHOW_EM(object):
"""
Show EM for a group of Resolutions.
"""
title = 'Show EM'
edges = (
dict(name='next', toNode='SHOW_EM_RESOLUTION', title='Show resolution'),
)
def next_edge(self, edge, *args, **kwargs):
if args and args[0].state.get_data_attr('resolutions_stack'):
return edge.fromNode.fsm.get_node('SHOW_EM_RESOLUTION')
elif args and args[0].state.get_data_attr('em_resolutions'):
return edge.fromNode.fsm.get_node('SHOW_EM')
return edge.toNode
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
em_resolutions = chat.state.get_data_attr('em_resolutions')
if em_resolutions:
em = em_resolutions.pop()
chat.state.set_data_attr('resolutions_stack', em['resolutions'])
chat.state.set_data_attr('em_resolutions', em_resolutions)
chat.state.save_json_data()
_data = {
'chat': chat,
'text': mark_safe(md2html(f'**{em.get("em_title")}** \n {em.get("em_text")}')),
'owner': chat.user,
'input_type': 'options',
'kind': 'button',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class SHOW_EM_RESOLUTION(object):
"""
Show new Resolutions one by one.
"""
title = 'View a resolution'
edges = (
dict(name='next', toNode='ACT', title='Go to new answers'),
)
def next_edge(self, edge, *args, **kwargs):
if args and args[0].state.get_data_attr('resolutions_stack'):
return edge.fromNode.fsm.get_node('SHOW_EM_RESOLUTION')
elif args and args[0].state.get_data_attr('em_resolutions'):
return edge.fromNode.fsm.get_node('SHOW_EM')
elif args and 'faq_answers' in args[0].state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_ANSWERS')
elif args and 'new_ems' in args[0].state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_EMS')
elif args and 'new_faqs' in args[0].state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_FAQS')
return edge.toNode
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
# TODO implement Stack-like interface
resolutions_stack = chat.state.get_data_attr('resolutions_stack')
if resolutions_stack:
resolution = resolutions_stack.pop()
chat.state.set_data_attr('resolutions_stack', resolutions_stack)
chat.state.save_json_data()
_data = {
'chat': chat,
'text': mark_safe(md2html(resolution.get('text'))),
'owner': chat.user,
'input_type': 'options',
'kind': 'button',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class SHOW_NEW_ANSWERS(object):
"""
Start point in Answers presentation.
"""
title = 'View answers'
edges = (
dict(name='next', toNode='SHOW_FAQ', title='Go to FAQ recall step'),
)
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs):
_data = {
'chat': chat,
'text': 'There are new answers for FAQs you are interested in',
'owner': chat.user,
'input_type': 'custom',
'kind': 'message',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class SHOW_FAQ(object):
"""
Show FAQ one interested in.
"""
title = 'Recall FAQ'
edges = (
dict(name='next', toNode='ACT', title='Go to the next FAQ answer'),
)
def next_edge(self, edge, *args, **kwargs):
chat = args[0]
if waffle.switch_is_active('compound_faq_answer'):
faq_answers = chat.state.get_data_attr('faq_answers')
if faq_answers:
faq = faq_answers.pop()
answers = faq['answers']
for answer in answers:
answer['faq_title'] = (faq.get('faq_title', ''))
chat.state.set_data_attr('answers_stack', answers)
chat.state.set_data_attr('faq_answers', faq_answers)
chat.state.save_json_data()
if args and chat.state.get_data_attr('answers_stack'):
return edge.fromNode.fsm.get_node('SHOW_FAQ_ANSWER')
elif args and chat.state.get_data_attr('faq_answers'):
return edge.fromNode.fsm.get_node('SHOW_FAQ')
elif args and 'new_ems' in chat.state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_EMS')
elif args and 'new_faqs' in chat.state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_FAQS')
return edge.toNode
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
faq_answers = chat.state.get_data_attr('faq_answers')
if faq_answers:
faq = faq_answers.pop()
answers = faq['answers']
for answer in answers:
answer['faq_title'] = (faq.get('faq_title', ''))
chat.state.set_data_attr('answers_stack', answers)
chat.state.set_data_attr('faq_answers', faq_answers)
chat.state.save_json_data()
_data = {
'chat': chat,
'text': mark_safe(md2html(f'**{faq.get("faq_title")}** \n {faq.get("faq_text")}')),
'owner': chat.user,
'input_type': 'options',
'kind': 'button',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class SHOW_FAQ_ANSWER(object):
"""
Show FAQ one interested in.
"""
title = 'Present the FAQ answers'
edges = (
dict(name='next', toNode='ACT', title='Go to new EMs'),
)
def next_edge(self, edge, *args, **kwargs):
if args and args[0].state.get_data_attr('answers_stack'):
return edge.fromNode.fsm.get_node('SHOW_FAQ_ANSWER')
elif args and args[0].state.get_data_attr('faq_answers'):
return edge.fromNode.fsm.get_node('SHOW_FAQ')
elif args and 'new_ems' in args[0].state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_EMS')
elif args and 'new_faqs' in args[0].state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_FAQS')
return edge.toNode
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
# TODO implement Stack-like interface
answers_stack = chat.state.get_data_attr('answers_stack')
if answers_stack:
answer = answers_stack.pop()
chat.state.set_data_attr('answers_stack', answers_stack)
chat.state.save_json_data()
if waffle.switch_is_active('compound_faq_answer'):
text1 = md2html(f'Here\'s my answer to your question \"{answer.get("faq_title")}\"')
text2 = md2html(answer.get('text'))
text = text1 + text2
else:
text = answer.get('text')
_data = {
'chat': chat,
'text': mark_safe(text),
'owner': chat.user,
'input_type': 'options',
'kind': 'button',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class SHOW_NEW_EMS(object):
"""
Show all new EMs.
"""
title = 'View EMs'
edges = (
dict(name='next', toNode='GET_NEW_EMS', title='Go to getting Student response'),
)
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
_data = {
'chat': chat,
'text': """
I have added new blindspots in this thread after reading your answers. Hopefully they'll help you understand these concepts better.
Check the box(es) that seem relevant to your answer (if any).
""",
'owner': chat.user,
'input_type': 'custom',
'kind': 'message',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class GET_NEW_EMS(object):
"""
Get student response for new EMs.
"""
title = 'Get EMs from a Student'
edges = (
dict(name='next', toNode='ACT', title='Go to new FAQs'),
)
def next_edge(self, edge, *args, **kwargs):
if args and 'new_faqs' in args[0].state.load_json_data():
return edge.fromNode.fsm.get_node('SHOW_NEW_FAQS')
return edge.toNode
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
unit_lesson = next_lesson
response = chat.message_set.filter(
lesson_to_answer_id=unit_lesson.id, kind='response', contenttype='response').first().content
# TODO investigate 'content_id': uniterror.id AttributeError: 'NoneType' object has no attribute 'id'
uniterror = UnitError.objects.filter(response=response, unit=chat.enroll_code.courseUnit.unit).first()
_data = {
'chat': chat,
'contenttype': 'uniterror',
'content_id': uniterror.id if uniterror else None,
'owner': chat.user,
'input_type': 'options',
'kind': 'uniterror',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
def get_errors(self, message) -> Message:
checked_errors = UnitError.objects.get(
id=message.content_id
).response.studenterror_set.all().values_list('errorModel', flat=True)
error_str = (
'<li><div class="chat-check chat-selectable {}" data-selectable-attribute="errorModel" '
'data-selectable-value="{:d}"></div><h3>{}</h3></li>'
)
errors = reduce(
lambda x, y: x + y, [error_str.format(
'chat-selectable-selected' if x.get('em_id') in checked_errors else '',
x.get('em_id'),
x.get('em_title')
) for x in message.chat.state.get_data_attr('new_ems')]
)
return '<ul class="chat-select-list">{}</ul>'.format(
errors or '<li><h3>There are no misconceptions to display.</h3></li>'
)
class SHOW_NEW_FAQS(object):
"""
Show all new FAQs.
"""
title = 'View FAQs'
edges = (
dict(name='next', toNode='FAQ_UPDATES', title='Ask for acknowlegement'),
)
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
_data = {
'chat': chat,
'text': 'There are new questions from Students. I hope it can help you.',
'owner': chat.user,
'input_type': 'custom',
'kind': 'message',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class FAQ_UPDATES(object):
title = 'FAQ_UPDATES'
edges = (
dict(name='next', toNode='ACT', title='View Next Lesson'),
)
class ACT(object):
"""
Get acknowledgement.
"""
title = 'Check acknowlegement'
edges = (
dict(name='next', toNode='GET_ACT', title='Get an acknowlegement'),
)
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
_data = {
'chat': chat,
'text': 'Have you anything else you are worried about?',
'owner': chat.user,
'input_type': 'custom',
'kind': 'message',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class GET_ACT(object):
"""
Get acknowledgement.
"""
title = 'Check acknowlegement'
edges = (
dict(name='next', toNode='TRANSITION', title='Move to the transition state'),
)
EVAL_TO_STATUS_MAP = {
'yes': NEED_HELP_STATUS,
'no': DONE_STATUS
}
def next_edge(self, edge, *args, **kwargs):
if not args[0].state.parentState:
chat = args[0]
threads = chat.enroll_code.courseUnit.unit.unitlesson_set.filter(order__isnull=False).order_by('order')
has_updates = False
for thread in threads:
# TODO: move to a dedicated util
response_msg = chat.message_set.filter(
lesson_to_answer_id=thread.id,
kind='response',
contenttype='response',
content_id__isnull=False).last()
if not response_msg:
continue
response = response_msg.content
is_need_help = response.status in (None, NEED_HELP_STATUS, NEED_REVIEW_STATUS)
if is_need_help and thread.updates_count(chat) > 0:
has_updates = True
break
if not has_updates:
return edge.fromNode.fsm.get_node('END')
return edge.toNode
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
_response_data = {
'lesson': chat.state.unitLesson.lesson,
'unitLesson': chat.state.unitLesson,
'course': chat.enroll_code.courseUnit.course,
'author': chat.user,
'activity': chat.state.activity,
'is_test': chat.is_test,
'is_preview': chat.enroll_code.isPreview,
'is_trial': chat.is_trial,
}
resp = Response(**_response_data)
resp.save()
_data = {
'contenttype': 'response',
'content_id': resp.id,
'input_type': 'options',
# This is needed to track the last response to handle status
'lesson_to_answer_id': chat.state.unitLesson.id,
'chat': chat,
'owner': chat.user,
'kind': 'response',
'userMessage': True,
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
def get_options(self, *args, **kwargs):
return [dict(value=i[0], text=i[1]) for i in YES_NO_OPTIONS]
def handler(self, message, chat, request, state_handler) -> None:
"""
Handle Student response.
Must be used during PUT request processing.
"""
response = message.content
response.status = self.EVAL_TO_STATUS_MAP.get(request.data.get('option'), NEED_HELP_STATUS)
response.save()
message.text = dict(YES_NO_OPTIONS).get(request.data.get('option'))
message.save()
chat.next_point = message
chat.last_modify_timestamp = timezone.now()
chat.save()
class TRANSITION(object):
title = 'Transition'
edges = (
dict(name='next', toNode='END', title='Get an acknowlegement'),
)
def next_edge(self, edge, fsmStack, request, useCurrent=False, **kwargs):
"""
Edge method that moves us to right state for next lesson (or END).
"""
fsm = edge.fromNode.fsm
if 'next_update' in fsmStack.state.load_json_data() and \
fsmStack.state.get_data_attr('next_update') and \
fsmStack.state.get_data_attr('next_update').get('enabled'):
return fsm.get_node('VIEWUPDATES')
return edge.toNode
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
threads = chat.enroll_code.courseUnit.unit.unitlesson_set.filter(order__isnull=False).order_by('order')
has_updates = {
'enabled': False,
'thread_id': None
}
for thread in threads:
# TODO: move to a dedicated util
response_msg = chat.message_set.filter(
lesson_to_answer_id=thread.id,
kind='response',
contenttype='response',
content_id__isnull=False).last()
if not response_msg:
continue
response = response_msg.content
is_need_help = response.status in (None, NEED_HELP_STATUS, NEED_REVIEW_STATUS)
if is_need_help and thread.updates_count(chat) > 0:
has_updates.update({'thread_id': thread.id})
chat.state.set_data_attr('next_update', has_updates)
chat.state.save_json_data()
break
if has_updates['thread_id']:
text = f"""
You have completed this thread.
I have posted new messages to help you in the thread "{thread.lesson.title}".
Would you like to view these updates now?
"""
elif chat.state.parentState:
next_lesson = None
parent = chat.state.parentState
while parent and not parent.fsmNode.fsm.fsm_name_is_one_of('chat'):
parent = parent.parentState
if parent:
status = parent.get_data_attr('unitStatus')
next_lesson = status.get_next_lesson().lesson.title if status.get_next_lesson() else None
text = f"""
You have completed this thread.
Click on Continue below to view your next thread "{next_lesson}".
""" if next_lesson else 'You have completed this thread.'
else:
text = 'You have completed this thread.'
_data = {
'chat': chat,
'text': text,
'owner': chat.user,
'input_type': 'options',
'kind': 'button',
'sub_kind': 'transition',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
def get_options(self, *args, **kwargs) -> list:
"""
We should not reach this code on last transition node w/o updates.
"""
state = args[0].state
parent = state.parentState
while parent and not parent.fsmNode.fsm.fsm_name_is_one_of('chat'):
parent = parent.parentState
options = [{'value': 'next_thread', 'text': 'Continue'}] \
if parent and not is_last_thread(parent) else []
if has_updates(state):
options.insert(0, {'value': 'next_update', 'text': 'View updates'})
if len(options) == 2:
options[1]['text'] = 'View next thread'
return options
def handler(self, message, chat, request, state_handler) -> None:
"""
Handle Student transition decision.
Must be used during PUT request processing.
"""
data = request.data.get('option')
if data == 'next_update':
data = chat.state.get_data_attr('next_update')
data.update({'enabled': True})
chat.state.set_data_attr('next_update', data)
chat.state.save_json_data()
chat.next_point = state_handler.next_point(
current=message.content,
chat=chat,
message=message,
request=request)
chat.save()
class VIEWUPDATES(object):
title = 'END'
edges = (
dict(name='next', toNode='END', title='Get updates'),
)
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
_data = {
'chat': chat,
'text': 'Hey hey UPD',
'owner': chat.user,
'input_type': 'custom',
'kind': 'message',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class FAILEDTRANSITION(object):
"""
There we want to ask a Student to submit the transition to the next Thread.
"""
title = 'Transition'
edges = (
dict(name='next', toNode='END', title='Move to the END'),
)
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
_data = {
'chat': chat,
'text': 'Look\'s like you revieved updates in a different way.',
'owner': chat.user,
'input_type': 'options',
'kind': 'button',
'sub_kind': 'transition',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
class END(object):
"""
Final None.
Intend is to me a marker for a FSM handler to remove a state.
"""
title = 'Courselet core lessons completed'
def get_message(self, chat, next_lesson, is_additional, *args, **kwargs) -> Message:
_data = {
'chat': chat,
'text': 'Let\'s return to your previous state.',
'owner': chat.user,
'input_type': 'custom',
'kind': 'message',
'is_additional': is_additional
}
message = Message(**_data)
message.save()
return message
def get_specs():
"""
Get FSM specifications stored in this file.
"""
from fsm.fsmspec import FSMSpecification
spec = FSMSpecification(
name='updates',
hideTabs=True,
title='Present updates for a particular Thread.',
pluginNodes=[
START,
UPDATES,
SHOW_NEW_RESOLUTIONS,
SHOW_EM,
SHOW_EM_RESOLUTION,
SHOW_NEW_ANSWERS,
SHOW_FAQ,
SHOW_FAQ_ANSWER,
SHOW_NEW_EMS,
GET_NEW_EMS,
SHOW_NEW_FAQS,
FAQ_UPDATES,
ACT,
GET_ACT,
TRANSITION,
VIEWUPDATES,
FAILEDTRANSITION,
END
],
)
return (spec,)
| |
"""
this is the stuff specific to the plant studies that are loaded. It can include comments and all that. The studytreelist will be read from the load_synth_extract file. Other variables in the conf could be overridden here.
"""
import load_synth_extract
studytreelist=[
"244_3855", # Gnetum, generic level. Won and Renner. 2006. Syst. Biol.
"2879_6674", # Almost all Bryophyta genera. Cox et al. 2010. Phytotaxa
"2878_6673", # Lepechinia; generic level. Drew et al. 2013. Bot. J. Linn. Soc.
"412_2166", # Coniferophyta; 492 taxa; almost all genera monophyletic. Leslie et al. 2012. PNAS
"2827_6577",#Ilex NEW
"1022_1967",#Pontederiaceae
#"194_2284",#early and nymphaeles
"562_817", #Poales
"424_532", #Lonicera
"1916_3902", #Brassicaceae
"588_878", #Asparagales
# "826_1584", #rosaceae #this is actually a fungal tree
"926_1825",#rosaceae
"1133_5647", #Rosales
"2624_6139", #Veronica
"2128_4437",#Plantago
"1102_2177",#Collinsia
"625_1016", # Hoheria
"761_1415", # Drosera
"2048_4220", # allium
"1264_2544",#isoetes
"1129_2251", # Solanum
"1842_3724", # Oxalis
"288_5028", # Croton
"754_1392", # Ribes
"1137_2295", # Erythronium
"1109_2201", # Castilleja
"2004_4118",#cyrtandra
"385_458", # Begonia
"1843_3725", # Euphorbia
"1858_3754", # Euphorbia
"330_325", # Santalum
"394_483", # Cucumis
"56_5821", # Tsuga
"53_1280", # Euryops
"62_2878", # Lymania
"77_5878", # Anaxagorea
"2841_6597", # Sparganium
"1118_2226", #Mentheae,lamiaceae
"2669_6213", #Lamiaceae
## "19_6175", #Verbenaceae
"2032_5922",#Ruella
"1901_3877",#Lentibulariaceae
"713_1287", #Lamiales
"1131_2265", #Saxifragaceae
"2608_6288", #saxifrigales
"2539_6294",#Soltis et al. 2011 ML tree
# "2539_5465",#Soltis et al. 2011 bootstrap
"2820_6566",#Streptophyta
"2712_6296", #Rosids
"259_142", #Cercis FABALES!
"264_150", #Coursetia FABALES!
"267_161", #Ateleia (Swartzieae-Leguminosae) FABALES!
"2077_4291", #Podalyria (Fabaceae, Podalyrieae) FABALES!
"293_201", #Mimosa FABALES!
"197_784", #Phaseolus FABALES!
"595_896", #Senna FABALES!
"131_6236", #Trifolium FABALES!
"2689_6241", #Lupinus FABALES!
"597_906", #Machaerium (Leguminosae) FABALES!
"2001_4100", #Astragalus FABALES!
"606_5290", #Trifolieae and Vicieae FABALES!
"54_949", #Indigofereae FABALES!
"596_901", #Genisteae (Leguminosae) FABALES!
"294_202", #Detarieae (Caesalpinioideae) FABALES!
"292_199", #(Diocleinae: Papilionoideae) FABALES!
"58_775", #Crotalarieae (Fabaceae) FABALES!
"548_798", #Vigna FABALES!
"2055_4234", #Genistoid legumes FABALES!
"2057_4240", #papilionoid FABALES!
"2127_4426", #Papilionoideae; Vataireoid Clade FABALES!
"594_890", #robinioid legumes FABALES!
"261_145", #Caesalpinieae FABALES!
"57_777", #Podalyrieae (Fabaceae) FABALES!
"78_6237", #phaseoloid FABALES!
"78_5858", #phaseoloid FABALES!
"2690_6243", #Fabaceae FABALES!
"2045_4213", #Acacia FABALES!
"605_947", #Strophostyles (Fabaceae) FABALES!
"271_5017", #Polygalaceae FABALES!
#"265_153", #Fabales FABALES!
#"998_2313", #Fabales
"2661_6198", #Ericales
"2645_6165",#Menispermaceae
"2644_6164",#Ranunculales
"2610_6117", #malpighiales tree, the best one we have right now, we think6
"2642_6161",#Cayophyllales; not sure if you have a better study here)
"2052_4228",#Lundia
"1103_2178",#Bignonieae
"14_12", #Bignoniaceae
"2140_4483",#Annonaceae
"2648_6171",#Marchantiales
"650_1147",#Meliaceae, Sapindales
"2085_4317",#Araceae
"2044_4212",#Orobanchaceae
"2626_6142",#Amaranthaceae
"2598_6020",#Boraginaceae
"2564_5699",#Polystichum
"2042_4202",#Bartramiaceae
## "2034_4191",#Ruellieae
"2000_4098",#Coffea
"20_2162",#Gallium
"1101_2172",#Rubieae
"2565_5708",#Ericoideae
"1094_2138",#Apocynaceae
"2641_6160",#Rubiaceae
"99_5885",#Barnadesioideae
"275_167",#Celastraceae
"93_1411",#Symplocos
"30_2281",#Illicium
#"36_36",#Dendropanax NOT ROOTED AND PROBABLY NOT GREAT
"898_1732",#Schefflera
"216_5865",#Hedera
"901_1740",#Meryta
"2830_6583",#brassaiopsis
"2831_6584",#Escallonia
"719_1296",#Nymphoides
"1975_4041",#Tragopogon
"1821_3678",#Helichrysum
"1583_3194",#Gaillardia
"1581_3188",#Dubautia
"1575_3164",#Tolpis
"332_333", # Polygonaceae
"1573_3144",#Onoseris
"934_1832",#Echinops
"200_6585",#Encelia
"152_5743",#Coreopsis
"53_1281",#Euryops
"2076_4282",#Garrya
"2832_6586",#Sedum
"37_5871",#Rhus
"50_1397",#Anagallis
"1866_3765",#Thesium (Santalaceae)
"59_5731",#Aristolochiaceae
"73_5787",#Passiflora
"80_5881",#Rhododendron
"81_5863",#Pinus
"82_5792",#Campanula
"88_5848",#Erodium
#"231_5505", #caryoph SOME SORT OF LOADING PROBLEM
"180_794",#Araceae
#"574_840",#Asparagales upload problems
"576_849",#Alocasia (Araceae)
"581_859",#Crocus (Iridaceae)
"582_862",#Mermuellera (Poaceae)
"598_926",#Poeae (Poaceae)
"599_927",#Costaceae
"603_940",#Maxillaria (orchidaceae)
"704_1266",#Molluginaceae
"721_1298",#Commelinaceae
"723_1300",#Triticum
#"724_3212",#Pleurothallidinae (Orchidaceae) upload problems
"921_4103",#Oryzeae (Poaceae)
"1300_2613",#Hymenophyllum (Hymenophyllaceae)
#"1302_2616", make for weird euphyllophyta
"1962_6580",#Viburnum Clement and Donoghue 2011
"915_1802",#Viburnum
"915_1803",#Valerianaceae
"2625_6140",#Utricularia
"1130_2258",#Nicotiana
"2047_4217",#Cuscuta
"386_459",#Brunsfelisia
"139_5860",#Nierembergia
"126_2233",#Solanum
"136_5857",#cestrum
"9_1",#Campanulidae
"2828_6578",#caprifolieae Smith 2009 NEW
"142_38",#Asclepias
"2638_6157",#Santalales
"21_37",#Solanaceae
"72_801",#Malpighiaceae
"75_1743",#Apioideae
"1974_4038",#PolygonaceaeS
"1974_4039",#Rheum
#"535_768",#Eriogonoideae
"61_816",#Bromeliaceae
## "284_185",#Cucurbitaceae
"2546_5493",#Sapindaceae
#"1086_2111",#Cactaceae
"41_1396",#Feddea
"283_184",#Celastrales
#"1116_2217",#Lamiales (Oxelman 2005)
"225_5991",#deep plants
"1867_3766", #cycads
"1278_2572",#Liverworts
"1268_2560",#hornworts
"412_2166",#conifers
"787_1489", # Ephedra
"2046_5928" #Trebouxiophyceae, Chlorophyta
]
studytreelistTF = [True] * len(studytreelist)
if __name__ == "__main__":
from stephen_desktop_conf import *
synthottolid="10218"
print "loading synthottolid:",synthottolid
print "loading studytreelist:",studytreelist
load_synth_extract.run(dott,dload,studyloc,studytreelist,javapre,
treemloc,generallogfileloc,dsynth,synthottolid,treefn,studytreelistTF)
| |
#
# $LicenseInfo:firstyear=2010&license=mit$
#
# Copyright (c) 2010, Linden Research, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# $/LicenseInfo$
#
from StringIO import StringIO
import unittest
from apiary.mysql.sqllog import *
from apiary.tools.timestamp import TimeStamp
TestSource = 'unittest'
FakeJobs = [str(n) for n in range(111, 1000, 111)]
FakeJob = FakeJobs[-1]
class TestEvent(unittest.TestCase):
def testEventStr(self):
e = Event(123456.789, FakeJobs[0], TestSource, 'QueryStart', 'SELECT * FROM foo')
self.assertEqual(str(e), """\
123456.789000\t%s\t%s\tQueryStart
SELECT * FROM foo
**************************************
""" % (FakeJobs[0], TestSource))
def testOrdering(self):
e1 = Event(10001.700, FakeJob, TestSource, 'QueryStart', 'SELECT')
e2 = Event(10001.717, FakeJob, TestSource, 'QueryResponse', 'SELECT')
e3 = Event(10001.717, FakeJob, TestSource, 'Quit', 'Quit')
e4 = Event(10001.729, FakeJobs[0], TestSource, 'QueryStart', 'SELECT')
self.assert_(e1 < e2)
self.assert_(e1 < e3)
self.assert_(e1 < e4)
self.assert_(e2 < e3)
self.assert_(e2 < e4)
self.assert_(e3 < e4)
class TestParsing(unittest.TestCase):
def testSimpleStanza(self):
f = StringIO("""\
1237237351.064861\t10.0.0.1:40784\t%s\tQueryResponse
SELECT column1, column2 FROM some_table WHERE column1='foo'
**************************************
1237237351.065393\t10.0.0.2:39706\t%s\tQueryStart
SELECT t1.column1, t2.column2, t1.column3 FROM table1 t1, table2 t2 WHERE t1.column1 = '00000000-0000-0000-0000-000000000000' AND t2.column2 = t1.column4
**************************************
""" % (TestSource, TestSource))
s = parse_stanza(f)
self.assert_(s is not None)
self.assertEqual(s.time, TimeStamp(1237237351.064861))
self.assertEqual(s.id, '10.0.0.1:40784')
self.assertEqual(s.state, 'QueryResponse')
self.assertEqual(s.body, """\
SELECT column1, column2 FROM some_table WHERE column1='foo'
""")
def disabled_testMkQueryLogSyntax(self):
f = StringIO("""\
# administrator command: Connect;
# Time: 091022 12:43:08.898136
# User@Host: user[user] @ 10.0.0.1 []
# Client: 10.0.0.1:40737
# Thread_id: 10000000
# Query_time: 0 Lock_time: 0 Rows_sent: 0 Rows_examined: 0
use some_table;
SELECT foo FROM bar WHERE column1 = 'some_uid' AND column2 = 'another_uid' AND column3 = 1;
""")
s = parse_stanza(f)
self.assert_(s is not None)
self.assertEqual(s.time, TimeStamp(1256215388.898136))
self.assertEqual(s.id, '10.0.0.1:40737:10000000')
self.assertEqual(s.body, """\
use some_table;
SELECT foo FROM bar WHERE column1 = 'some_uid' AND column2 = 'another_uid' AND column3 = 1;
""")
def testEmptyStanza(self):
f = StringIO('')
s = parse_stanza(f)
self.assert_(s is None)
def testMissingStanzaEnd(self):
f = StringIO("""\
1237237351.064861\t10.0.0.1:40784\t%s\tQueryResponse
SELECT column1, column2 FROM table1 WHERE column3='foo'
""" % TestSource)
s = parse_stanza(f)
self.assert_(s is not None)
self.assertEqual(s.time, TimeStamp(1237237351.064861))
self.assertEqual(s.id, '10.0.0.1:40784')
self.assertEqual(s.state, 'QueryResponse')
self.assertEqual(s.body, """\
SELECT column1, column2 FROM table1 WHERE column3='foo'
""")
def testJunkLeadInStanza(self):
f = StringIO("""\
SELECT t1.column1, t2.column2, t1.column3 FROM table1 t1, table2 t2 WHERE t1.column4 = 'foo' AND t2.column5 = u.column6
**************************************
1237237351.064861\t10.0.0.1:40784\t%s\tQueryResponse
SELECT column1, column2 FROM table1 WHERE column3='foo'
**************************************
""" % TestSource)
s = parse_stanza(f)
self.assert_(s is not None)
self.assertEqual(s.time, TimeStamp(1237237351.064861))
self.assertEqual(s.id, '10.0.0.1:40784')
self.assertEqual(s.state, 'QueryResponse')
self.assertEqual(s.body, """\
SELECT column1, column2 FROM table1 WHERE column3='foo'
""")
class TestSequence(unittest.TestCase):
def testTime(self):
seq = Sequence()
seq.note(Event(10001.700, FakeJob, TestSource, 'QueryStart', 'SELECT'))
seq.note(Event(10001.703, FakeJob, TestSource, 'QueryResult', 'SELECT'))
seq.note(Event(10001.717, FakeJob, TestSource, 'Quit', 'Quit'))
self.assertEqual(seq.count(), 3)
self.assertEqual(seq.time(), TimeStamp(0.017))
def testQuit(self):
seq = Sequence()
seq.note(Event(10001.700, FakeJob, TestSource, 'QueryStart', 'SELECT'))
self.assert_(not seq.ended())
seq.note(Event(10001.703, FakeJob, TestSource, 'QueryResult', 'SELECT'))
self.assert_(not seq.ended())
seq.note(Event(10001.717, FakeJob, TestSource, 'Quit', 'Quit'))
self.assert_(seq.ended())
def testGenerateEnd(self):
seq = Sequence()
seq.note(Event(10001.700, FakeJob, TestSource, 'QueryStart', 'SELECT'))
seq.note(Event(10001.703, FakeJob, TestSource, 'QueryResult', 'SELECT'))
self.assert_(not seq.ended())
e = seq.generateEnd()
self.assertEqual(e.time, TimeStamp(10001.703))
self.assertEqual(e.id, FakeJob)
self.assertEqual(e.state, 'Quit')
def testTimeTo(self):
seq = Sequence()
e1 = Event(10001.700, FakeJob, TestSource, 'QueryStart', 'SELECT')
e2 = Event(10001.717, FakeJob, TestSource, 'Quit', 'Quit')
self.assert_(seq.timeto(e1) is None)
seq.note(e1)
self.assertEqual(seq.timeto(e1), TimeStamp(0))
self.assertEqual(seq.timeto(e2), TimeStamp(0.017))
class SimpleCoalesce(CoalesceSequences):
def __init__(self):
CoalesceSequences.__init__(self)
self.sequences = []
def fullSequence(self, e):
self.sequences.append(e)
class TestCoalesce(unittest.TestCase):
def assertEvent(self, e, time, id, state, body=None):
self.assertEqual(e.source, TestSource)
if time is not None:
if not isinstance(time, TimeStamp):
time = TimeStamp(time)
self.assertEqual(e.time, time)
if id is not None:
self.assertEqual(e.id, id)
if state is not None:
self.assertEqual(e.state, state)
if body is not None:
self.assertEqual(e.body, body)
def testOne(self):
c = CoalescedEvent()
c.add(Event(10001.500, FakeJob, TestSource, 'QueryStart', 'SELECT "foo"'))
c.add(Event(10001.600, FakeJob, TestSource, 'QueryStart', 'SELECT "bar"'))
c.add(Event(10001.700, FakeJob, TestSource, 'Quit', 'Quit'))
self.assertEvent(c, 10001.500, FakeJob, 'Sequence',
'10001.500000:SELECT "foo"\n+++\n'
'10001.600000:SELECT "bar"\n+++\n'
'10001.700000:Quit\n+++\n')
def testTwoSequential(self):
l = []
l.append(Event(10001.500, FakeJob, TestSource, 'QueryStart', 'SELECT "foo"'))
l.append(Event(10001.600, FakeJob, TestSource, 'QueryStart', 'SELECT "bar"'))
l.append(Event(10001.700, FakeJob, TestSource, 'Quit', 'Quit'))
l.append(Event(10002.500, FakeJobs[0], TestSource, 'QueryStart', 'SELECT "oof"'))
l.append(Event(10002.600, FakeJobs[0], TestSource, 'QueryStart', 'SELECT "rab"'))
l.append(Event(10002.700, FakeJobs[0], TestSource, 'Quit', 'Quit'))
sc = SimpleCoalesce()
sc.replay(l)
self.assertEqual(len(sc.sequences), 2)
self.assertEvent(sc.sequences[0], 10001.500, FakeJob, 'Sequence',
'10001.500000:SELECT "foo"\n+++\n'
'10001.600000:SELECT "bar"\n+++\n'
'10001.700000:Quit\n+++\n')
self.assertEvent(sc.sequences[1], 10002.500, FakeJobs[0], 'Sequence',
'10002.500000:SELECT "oof"\n+++\n'
'10002.600000:SELECT "rab"\n+++\n'
'10002.700000:Quit\n+++\n')
def testTwoInterleaved(self):
l = []
l.append(Event(10001.500, FakeJob, TestSource, 'QueryStart', 'SELECT "foo"'))
l.append(Event(10001.520, FakeJobs[0], TestSource, 'QueryStart', 'SELECT "oof"'))
l.append(Event(10001.600, FakeJob, TestSource, 'QueryStart', 'SELECT "bar"'))
l.append(Event(10001.620, FakeJobs[0], TestSource, 'QueryStart', 'SELECT "rab"'))
l.append(Event(10001.700, FakeJob, TestSource, 'Quit', 'Quit'))
l.append(Event(10001.720, FakeJobs[0], TestSource, 'Quit', 'Quit'))
sc = SimpleCoalesce()
sc.replay(l)
self.assertEqual(len(sc.sequences), 2)
self.assertEvent(sc.sequences[0], 10001.500, FakeJob, 'Sequence',
'10001.500000:SELECT "foo"\n+++\n'
'10001.600000:SELECT "bar"\n+++\n'
'10001.700000:Quit\n+++\n')
self.assertEvent(sc.sequences[1], 10001.520, FakeJobs[0], 'Sequence',
'10001.520000:SELECT "oof"\n+++\n'
'10001.620000:SELECT "rab"\n+++\n'
'10001.720000:Quit\n+++\n')
def testTwoNested(self):
l = []
l.append(Event(10001.500, FakeJob, TestSource, 'QueryStart', 'SELECT "foo"'))
l.append(Event(10002.500, FakeJobs[0], TestSource, 'QueryStart', 'SELECT "oof"'))
l.append(Event(10002.600, FakeJobs[0], TestSource, 'QueryStart', 'SELECT "rab"'))
l.append(Event(10002.700, FakeJobs[0], TestSource, 'Quit', 'Quit'))
l.append(Event(10003.600, FakeJob, TestSource, 'QueryStart', 'SELECT "bar"'))
l.append(Event(10003.700, FakeJob, TestSource, 'Quit', 'Quit'))
sc = SimpleCoalesce()
sc.replay(l)
self.assertEqual(len(sc.sequences), 2)
self.assertEvent(sc.sequences[0], 10001.500, FakeJob, 'Sequence',
'10001.500000:SELECT "foo"\n+++\n'
'10003.600000:SELECT "bar"\n+++\n'
'10003.700000:Quit\n+++\n')
self.assertEvent(sc.sequences[1], 10002.500, FakeJobs[0], 'Sequence',
'10002.500000:SELECT "oof"\n+++\n'
'10002.600000:SELECT "rab"\n+++\n'
'10002.700000:Quit\n+++\n')
def testManyNested(self):
l = []
l.append(Event(10001.500, FakeJobs[0], TestSource, 'QueryStart', 'SELECT "one"'))
l.append(Event(10002.500, FakeJobs[1], TestSource, 'QueryStart', 'SELECT "two"'))
l.append(Event(10002.700, FakeJobs[1], TestSource, 'Quit', 'Quit'))
l.append(Event(10003.500, FakeJobs[2], TestSource, 'QueryStart', 'SELECT "three"'))
l.append(Event(10003.700, FakeJobs[2], TestSource, 'Quit', 'Quit'))
l.append(Event(10004.500, FakeJobs[3], TestSource, 'QueryStart', 'SELECT "four"'))
l.append(Event(10004.700, FakeJobs[3], TestSource, 'Quit', 'Quit'))
l.append(Event(10005.500, FakeJobs[4], TestSource, 'QueryStart', 'SELECT "five"'))
l.append(Event(10005.700, FakeJobs[0], TestSource, 'Quit', 'Quit'))
l.append(Event(10006.500, FakeJobs[5], TestSource, 'QueryStart', 'SELECT "six"'))
l.append(Event(10006.700, FakeJobs[5], TestSource, 'Quit', 'Quit'))
l.append(Event(10007.500, FakeJobs[6], TestSource, 'QueryStart', 'SELECT "seven"'))
l.append(Event(10007.700, FakeJobs[6], TestSource, 'Quit', 'Quit'))
l.append(Event(10008.500, FakeJobs[7], TestSource, 'QueryStart', 'SELECT "eight"'))
l.append(Event(10008.700, FakeJobs[7], TestSource, 'Quit', 'Quit'))
l.append(Event(10009.700, FakeJobs[4], TestSource, 'Quit', 'Quit'))
sc = SimpleCoalesce()
sc.replay(l)
self.assertEqual(len(sc.sequences), 8)
self.assertEqual(sc.sequences[0].id, FakeJobs[0])
self.assertEqual(sc.sequences[1].id, FakeJobs[1])
self.assertEqual(sc.sequences[2].id, FakeJobs[2])
self.assertEqual(sc.sequences[3].id, FakeJobs[3])
self.assertEqual(sc.sequences[4].id, FakeJobs[4])
self.assertEqual(sc.sequences[5].id, FakeJobs[5])
self.assertEqual(sc.sequences[6].id, FakeJobs[6])
self.assertEqual(sc.sequences[7].id, FakeJobs[7])
def testMissingEnd(self):
l = []
l.append(Event(10001.500, FakeJobs[0], TestSource, 'QueryStart', 'SELECT "one"'))
l.append(Event(10002.500, FakeJobs[1], TestSource, 'QueryStart', 'SELECT "two"'))
l.append(Event(10002.700, FakeJobs[1], TestSource, 'Quit', 'Quit'))
l.append(Event(10003.500, FakeJobs[2], TestSource, 'QueryStart', 'SELECT "three"'))
sc = SimpleCoalesce()
sc.replay(l)
self.assertEqual(len(sc.sequences), 3)
self.assertEqual(sc.sequences[0].id, FakeJobs[0])
self.assertEqual(sc.sequences[1].id, FakeJobs[1])
self.assertEqual(sc.sequences[2].id, FakeJobs[2])
es = sc.sequences[0].events()
self.assertEqual(len(es), 2)
self.assertEvent(es[0], 10001.500, FakeJobs[0], Event.Query, 'SELECT "one"')
self.assertEvent(es[1], 10001.500, FakeJobs[0], Event.End)
es = sc.sequences[1].events()
self.assertEqual(len(es), 2)
self.assertEvent(es[0], 10002.500, FakeJobs[1], Event.Query, 'SELECT "two"')
self.assertEvent(es[1], 10002.700, FakeJobs[1], Event.End)
es = sc.sequences[2].events()
self.assertEqual(len(es), 2)
self.assertEvent(es[0], 10003.500, FakeJobs[2], Event.Query, 'SELECT "three"')
self.assertEvent(es[1], 10003.500, FakeJobs[2], Event.End)
def testSplitApart(self):
c = CoalescedEvent()
c.add(Event(10001.500, FakeJob, TestSource, 'QueryStart', 'SELECT "foo"'))
c.add(Event(10001.600, FakeJob, TestSource, 'QueryStart', 'SELECT "bar"\n'))
c.add(Event(10001.700, FakeJob, TestSource, 'QueryStart', '\nSELECT "baz"'))
c.add(Event(10001.800, FakeJob, TestSource, 'Quit', 'Quit'))
e = parse_stanza(StringIO(str(c)))
self.assertEqual(e.id, FakeJob)
self.assertEqual(e.state, CoalescedEvent.Sequence)
es = e.events();
self.assertEqual(len(es), 4)
self.assertEvent(es[0], 10001.500, FakeJob, Event.Query, 'SELECT "foo"')
self.assertEvent(es[1], 10001.600, FakeJob, Event.Query, 'SELECT "bar"\n')
self.assertEvent(es[2], 10001.700, FakeJob, Event.Query, '\nSELECT "baz"')
self.assertEvent(es[3], 10001.800, FakeJob, Event.End)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.