gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from pymongo import MongoClient
import pymongo
from datetime import datetime,time
import time
from bson.code import Code
mongo_client=MongoClient('mongodb://localhost:27017/')
db=mongo_client.mydb
db_col=db.things
dbc = mongo_client.mydb.things
print mongo_client
print(db)
print("connected")
def first_querry():
all_count = db.things.find().count()
return all_count
def second_querry():
allusers = first_querry()
pipeline = [
{"$group": {"_id": "$id_member", "count": {"$sum": 1}}},
{"$sort": {"count": -1}},
{"$limit": 10}
]
result = list(db.things.aggregate(pipeline))
sum1 = 0
for plithos in result:
sum1 = sum1 + plithos['count']
percentage = 100.0 * sum1 / allusers
return percentage
def third_querry():
result3a = db.things.find({}, {"timestamp": 1}).sort("timestamp", pymongo.DESCENDING).limit(1)
for row in result3a:
# print("The last message published on:"),
str(row["timestamp"])
tmax = row["timestamp"]
result3b = db.things.find({"timestamp": {'$ne': None}}, {"timestamp": 1}).sort("timestamp",
pymongo.ASCENDING).limit(1)
for rb in result3b:
# print("The earliest message published on:"),
str(rb["timestamp"])
tmin = rb["timestamp"]
return (tmax,tmin)
def fourth_querry():
tmax, tmin = third_querry()
dmax = datetime.strptime(tmax, "%Y-%m-%d %H:%M:%S")
secondmax = time.mktime(dmax.timetuple())
dmin = datetime.strptime(tmin, "%Y-%m-%d %H:%M:%S")
secondmin = time.mktime(dmin.timetuple())
all_plithos_msg = db.things.find().count()
deltatimemean = ((secondmax - secondmin) / (all_plithos_msg - 1))
return deltatimemean
data = dbc.find()
def fifth_querry(data):
sum_of_texts = 0
for row in data:
if 'text' in row:
sum_of_texts += len(str(row["text"]).encode('utf-8'))
average_tweet_size = sum_of_texts / db.things.count()
return average_tweet_size
def sixth_querry():
mapperUni = Code("""
function() {
var thisText = this.text;
var splitStr = thisText.toString().split(" ");
for(i=0 ; i< splitStr.length ;i++){
var clean1 = splitStr[i].replace(/[.,-\/#!$%\^&\*;:{}=\-_`~()]/g,"");
var clean2 = clean1.replace(/\s{2,}/g," ");
var cleanStr = clean2.trim();
if (cleanStr.length>0)
emit(cleanStr,1);
}
}
""")
reducerUni = Code("""
function(key, value) {
return Array.sum(value);
}
""")
unigram_counter = dbc.map_reduce(mapperUni, reducerUni, 'uniCounter')
unigram_list = list(db.uniCounter.find().sort('value', -1).limit(10))
for uni in unigram_list:
print ('Unigram' + uni['_id'] + 'has' + str(uni['value']) + 'appearances')
def seventh_querry():
mapperBi = Code("""
function() {
var tempText = this.text;
var splitText = tempText.toString().split(" ");
for(i=0 ; i<splitText.length-1 ;i++){
punctText = splitText[i].trim();
punctText2 = splitText[i+1].trim();
var punctRem = punctText.replace(/[.,-\/#!$%\^&\*;:{}=\-_`~()]/g,"");
var punctRem2 = punctText2.replace(/[.,-\/#!$%\^&\*;:{}=\-_`~()]/g,"");
var firstStr = punctRem.replace(/\s{2,}/g," ");
var secStr = punctRem2.replace(/\s{2,}/g," ");
finalStr = (firstStr + ' ' + secStr).trim();
if (finalStr !== '')
emit(finalStr,1);
}
}
""")
reducerBi = Code("""
function(key, value) {
return Array.sum(value);
}
""")
bigram_counter = dbc.map_reduce(mapperBi, reducerBi, 'bigramCounter')
bigram_list = list(db.bigramCounter.find().sort('value', -1).limit(10))
for bigrams in bigram_list:
print ('Bigram' + bigrams['_id'] + 'has' + str(bigrams['value']) + 'appearances')
def eight_querry(data):
sum_of_hashes_per_text = 0
for row in data:
if 'text' in row:
sum_of_hashes_per_text += str(row['text']).count('#')
average_hashes_size = sum_of_hashes_per_text / db.things.count()
return average_hashes_size
def ninth_querry():
mapperMap = Code("""
function() {
var ukCenterLat = '54.749991';
var ukCenterLng = '-3.867188';
var currentLng = this.geo_lng;
var currentLat = this.geo_lat;
var loc = "";
if (currentLng < ukCenterLng && currentLat >= ukCenterLat) {
loc = "North-West";
}else if(currentLng < ukCenterLng && currentLat < ukCenterLat){
loc = "South-West";
}else if (currentLng >= ukCenterLng && currentLat >= ukCenterLat) {
loc = "North-East";
}else if (currentLng >= ukCenterLng && currentLat < ukCenterLat){
loc = "South-East";
}
emit(loc, 1);
}
""")
reducerMap = Code("""
function(key, value) {
return Array.sum(value);
}
""")
LocationCounter = dbc.map_reduce(mapperMap, reducerMap, 'geoLocDistr')
topLocation = db.geoLocDistr.find().sort('value', -1).limit(1)
print('Most of the messages were published in' + topLocation[0]['_id'] + ' with ' + str(
topLocation[0]['value']) + ' tweets')
ans=True
while ans:
print("""
1.How many unique users are there?
2.How many tweets (%) did the top 10 users (measured by the number of messages) publish?
3.What was the earliest and latest date (YYYY-MM-DD HH:MM:SS) that a message was published?
4.What is the mean time delta between all messages?
5.What is the mean length of a message?
6.What are the 10 most common unigram within the messages?
7.What are the 10 most common bigram within the messages?
8.What is the average number of hashtags (#) used within a message?
10.Exit/Quit
""")
ans = raw_input("What would you like to do? ")
if ans == "1":
print "The summary of all unique users is: ", first_querry()
elif ans == "2":
print("The percentage of the ALL messages of top ten user"), second_querry(), "%",
elif ans == "3":
print"The last message published on:", third_querry()[0]
print"The earliest message published on:", third_querry()[1]
elif ans == "4":
print"The mean time delta between all messages is :", fourth_querry()
elif ans == "5":
print"The mean length of the messages is :", fifth_querry(data)
elif ans == "6":
print"The 10 most common unigrams within the messages are:", sixth_querry()
elif ans == "7":
print"The 10 most common bigrams within the messages are:", seventh_querry()
elif ans == "8":
print"The average number of hashtags (#) used within a message is:", eight_querry(data)
elif ans == "9":
ninth_querry()
elif ans == "10":
print("\n Goodbye")
ans = None
else:
print("\n Not Valid Choice Try again")
| |
import binascii
from unittest import TestCase
from bitmerchant.network import BitcoinMainNet
from bitmerchant.network import BitcoinTestNet
from bitmerchant.network import DogecoinMainNet
from bitmerchant.network import LitecoinMainNet
from bitmerchant.wallet import Wallet
from bitmerchant.wallet.bip32 import InsufficientKeyDataError
from bitmerchant.wallet.bip32 import InvalidPathError
from bitmerchant.wallet.bip32 import InvalidPrivateKeyError
from bitmerchant.wallet.bip32 import InvalidPublicKeyError
from bitmerchant.wallet.bip32 import KeyMismatchError
from bitmerchant.wallet.keys import IncompatibleNetworkException
from bitmerchant.wallet.utils import ensure_bytes
from bitmerchant.wallet.utils import long_to_hex
class TestWallet(TestCase):
@classmethod
def setUpClass(cls):
cls.expected_key = ensure_bytes(
"0488ade4" # BitcoinMainNet version
"00" # depth
"00000000" # parent fingerprint
"00000000" # child_number
# chain_code
"873dff81c02f525623fd1fe5167eac3a55a049de3d314bb42ee227ffed37d508"
"00" # key identifier
# private exponent
"e8f32e723decf4051aefac8e2c93c9c5b214313817cdb01a1494b917c8436b35")
cls.master_key = Wallet.deserialize(cls.expected_key)
def test_serialize_master_key(self):
self.assertEqual(self.expected_key, self.master_key.serialize())
def test_from_master_secret(self):
secret = binascii.unhexlify(b'000102030405060708090a0b0c0d0e0f')
self.assertEqual(Wallet.from_master_secret(secret),
self.master_key)
def test_from_master_secret_slow(self):
"""Verified against bip32.org"""
password = "correct horse battery staple"
w = Wallet.from_master_secret_slow(password)
self.assertEqual(
w.serialize_b58(private=True),
"xprv9s21ZrQH143K3JDqHk5kEb6o2w8pEwm3cmt8qaSw9coaHCYJFtaybzUob6d4"
"WyJDf8uspZkBAt7DcEVhvCDRBHZEavVJg51HZEGdVH2uXLK")
self.assertEqual(w.depth, 0)
self.assertEqual(w.parent_fingerprint, b"0x00000000")
self.assertEqual(w.child_number, 0)
self.assertEqual(
w.chain_code,
(b'7c73c15c623128246dcf37d439be2a9d'
b'da5fb33b2aec18e66a806d10a236b5c9'))
self.assertEqual(
w.export_to_wif(),
'KxTFZmNVYgAupo2w8QUNpfDjSEMhGN7RaQ6rhNRvsSHBggASpEr1')
child = w.get_child(0, is_prime=False)
self.assertEqual(
child.serialize_b58(private=True),
"xprv9vExvbix4MQgazj3vovZ4UEwmLSEQrktY8yZAVhFAB7W7xzqS9RXH8ZaNEdw"
"KoQzbPixY3YSVjK58S3K5h4ktjVEpHrfjUarsiUfKDe6A4i")
self.assertEqual(
child.export_to_wif(),
'L3LA3KxJELbwCyVjFaSrvvUsnfKcZ9TPmGXbq4s6zmK5kaBVja29')
self.assertEqual(
child.serialize_b58(private=False),
"xpub69EKL7FqtixyoUoX2qTZRcBgKNGipKUjuMu9xt6riWeUzmKyygjmpvt4DXaL"
"U2vyoVqYtpqyuDYDHsxbzzReQmou1PtwVthP3SJkjcHEEg4")
self.assertEqual(
child.get_public_key_hex(),
(b"03b18ba94530690859a3f6ebb2b866d1"
b"51f8499b3164d027ba5b464e4ed71329aa"))
self.assertEqual(
child.to_address(),
"1MfJvR28iULUb8AwtY7hp7xpc1A8Wg1ojX")
def test_invalid_network_prefix(self):
key = self.expected_key
key = (long_to_hex(BitcoinTestNet.EXT_SECRET_KEY, 8) +
self.expected_key[8:])
self.assertRaises(IncompatibleNetworkException,
Wallet.deserialize, key, BitcoinMainNet)
self.assertTrue(Wallet.deserialize(key, BitcoinTestNet))
def test_public_export(self):
"""Export a node as public."""
child = self.master_key.get_child(0, as_private=False)
self.assertEqual(child.private_key, None)
key = child.serialize(private=False)
self.assertTrue(
long_to_hex(BitcoinMainNet.EXT_PUBLIC_KEY, 8) in key)
self.assertEqual(Wallet.deserialize(key), child)
def test_public_export_mismatch(self):
"""Can't export a public node as private."""
child = self.master_key.get_child(0, as_private=False)
self.assertEqual(child.private_key, None)
self.assertRaises(ValueError, child.serialize)
def test_random_wallet(self):
w = Wallet.new_random_wallet()
self.assertTrue(Wallet.deserialize(w.serialize()), w)
self.assertEqual(w.depth, 0)
self.assertEqual(w.parent_fingerprint, b'0x' + long_to_hex(0, 8))
self.assertEqual(w.child_number, 0)
w2 = Wallet.new_random_wallet()
self.assertNotEqual(w.get_private_key_hex(), w2.get_private_key_hex())
def test_random_wallet_with_entropy(self):
w1 = Wallet.new_random_wallet('foo')
w2 = Wallet.new_random_wallet('foo')
self.assertNotEqual(w1.get_private_key_hex(), w2.get_private_key_hex())
def test_insuffient_key_data(self):
self.assertRaises(InsufficientKeyDataError, Wallet,
chain_code=self.master_key.chain_code,
private_exponent=None,
private_key=None,
public_pair=None,
public_key=None)
def test_private_exponent(self):
"""Ensure we can create a wallet with just a private exponent."""
Wallet(chain_code='0' * 64,
private_exponent=(self.master_key.private_key._private_key
.privkey.secret_multiplier))
def test_private_key(self):
"""Ensure a private key is sufficient to create a wallet."""
Wallet(chain_code='0' * 64,
private_key=self.master_key.private_key)
def test_private_key_type(self):
"""Must be a bitmerchant private key"""
self.assertRaises(
InvalidPrivateKeyError, Wallet,
chain_code='0' * 64,
private_key=self.master_key.private_key._private_key)
def test_public_pair(self):
Wallet(chain_code=b'0' * 64,
public_pair=self.master_key.public_key.to_public_pair())
def test_public_key(self):
Wallet(chain_code=b'0' * 64,
public_key=self.master_key.public_key)
def test_public_key_type(self):
self.assertRaises(
InvalidPublicKeyError, Wallet,
chain_code=b'0' * 64,
public_key=self.master_key.public_key._verifying_key)
def test_mismatch_public_private(self):
w = Wallet.new_random_wallet()
self.assertRaises(
KeyMismatchError, Wallet,
chain_code=b'0' * 64,
private_key=self.master_key.private_key,
public_key=w.public_key)
class TestNewAddressForUser(TestCase):
def setUp(self):
self.w = Wallet.new_random_wallet()
def test_invalid_user_id(self):
self.assertRaises(
ValueError,
self.w.create_new_address_for_user,
-10)
self.assertRaises(
ValueError,
self.w.create_new_address_for_user,
0x80000000 + 1)
def test_new_address(self):
child = self.w.create_new_address_for_user(10)
self.assertEqual(
self.w.get_child(10, as_private=False), child)
class TestCrackPrivateKey(TestCase):
def setUp(self):
self.w = Wallet.new_random_wallet()
self.pub_derived_private_child = self.w.get_child(100)
self.wpub = self.w.public_copy()
self.assertTrue(self.wpub.private_key is None)
def test_already_have_private(self):
self.assertRaises(AssertionError,
self.w.crack_private_key,
self.pub_derived_private_child)
def test_invalid_fingerprint(self):
child = self.pub_derived_private_child.get_child(10)
self.assertRaises(ValueError, self.wpub.crack_private_key, child)
def test_invalid_prime(self):
child = self.w.get_child(-100)
self.assertRaises(ValueError, self.wpub.crack_private_key, child)
def test_crack_child(self):
cracked = self.wpub.crack_private_key(self.pub_derived_private_child)
self.assertEqual(cracked, self.w)
self.assertEqual(cracked.get_child(100),
self.pub_derived_private_child)
self.assertEqual(cracked.get_child(-100), self.w.get_child(-100))
class TestSubkeyPath(TestCase):
"""Tests for get_child_for_path not covered by TestVectors."""
@classmethod
def setUpClass(cls):
"""
This particular key was found by accident to cause the public
deserialized wallet to have a bad public key point!
There was a bug that did not properly handle restoring a key from
a compressed point that had an odd beta parameter.
(see PublicKey.from_hex_key)
"""
cls.wallet = Wallet.deserialize(
u'xprv9s21ZrQH143K319oTMcEt2n2g51StkEnXq23t52ajHM4zFX7cyPqaHShDod'
'cHAqorNQuDW82jUhXJLomy5A8kM36y8HntnosgCvc1szPJ6x')
def assert_public(self, node):
self.assertEqual(node.private_key, None)
def test_strip_private_key(self):
self.assert_public(self.wallet.public_copy())
self.assertNotEqual(self.wallet.private_key, None)
def test_export_as_public(self):
self.assert_public(self.wallet.get_child(0, as_private=False))
def test_path_as_public(self):
self.assert_public(self.wallet.get_child_for_path("M/0"))
self.assert_public(self.wallet.get_child_for_path("M/0.pub"))
self.assert_public(self.wallet.get_child_for_path("m/0.pub"))
self.assert_public(self.wallet.get_child_for_path("M"))
self.assert_public(self.wallet.get_child_for_path("m.pub"))
def test_public_final_with_prime(self):
self.assert_public(self.wallet.get_child_for_path("M/0/1'/2/3'.pub"))
def test_public_child_restore(self):
pub_child = self.wallet.get_child_for_path("M/0")
self.assert_public(pub_child)
loaded = Wallet.deserialize(pub_child.serialize(False))
self.assertEqual(pub_child, loaded)
n1 = pub_child.get_child_for_path("m/1")
n2 = loaded.get_child_for_path("m/1")
self.assertEqual(n1, n2)
def test_invalid_path(self):
self.assertRaises(
ValueError,
self.wallet.get_child_for_path,
None)
self.assertRaises(
InvalidPathError,
self.wallet.get_child_for_path,
"")
self.assertRaises(
InvalidPathError,
self.wallet.get_child_for_path,
"m/foo")
self.assertRaises(
InvalidPathError,
self.wallet.get_child_for_path,
"M/1234/4567m")
def test_child_too_small(self):
self.assertRaises(
ValueError,
self.wallet.get_child,
-(0x80000000 + 1))
def test_child_too_big(self):
self.assertRaises(
ValueError,
self.wallet.get_child,
0xFFFFFFFF + 1)
def test_path_bigger_than_boundary(self):
child_number = 0x80000000
self.assertRaises(
ValueError,
self.wallet.get_child_for_path, "m/%s" % child_number)
self.assertRaises(
ValueError,
self.wallet.get_child_for_path, "m/%s" % (child_number + 1))
self.assertNotEqual(
self.wallet.get_child_for_path("m/%s'" % (child_number - 1)),
self.wallet.get_child_for_path("m/%s" % (child_number - 1)))
def test_child_bigger_than_boundary(self):
child_number = 0x80000000
self.assertRaises(
ValueError, self.wallet.get_child, -1, is_prime=True)
self.assertRaises(
ValueError, self.wallet.get_child, -1, is_prime=False)
self.assertRaises(
ValueError, self.wallet.get_child, child_number, is_prime=True)
self.assertRaises(
ValueError, self.wallet.get_child, child_number, is_prime=False)
class TestSerialize(TestCase):
network = BitcoinMainNet
@classmethod
def setUpClass(cls):
cls.wallet = Wallet.new_random_wallet(network=cls.network)
def test_serialize_private(self):
prv = self.wallet.serialize(private=True)
w = Wallet.deserialize(prv, network=self.network)
self.assertTrue(w.private_key)
self.assertEqual(w, self.wallet)
prv = self.wallet.serialize_b58(private=True)
w = Wallet.deserialize(prv, network=self.network)
self.assertTrue(w.private_key)
self.assertEqual(w, self.wallet)
def test_serialize_public(self):
pub = self.wallet.serialize(private=False)
w = Wallet.deserialize(pub, network=self.network)
self.assertFalse(w.private_key)
pub = self.wallet.serialize_b58(private=False)
w = Wallet.deserialize(pub, network=self.network)
self.assertFalse(w.private_key)
def test_deserialize_byte_array(self):
key = binascii.unhexlify(self.wallet.serialize())
w = Wallet.deserialize(key, network=self.network)
self.assertEqual(w, self.wallet)
class TestSerializeDogecoin(TestSerialize):
network = DogecoinMainNet
class TestSerializeLitecoin(TestSerialize):
network = LitecoinMainNet
class _TestWalletVectors(TestCase):
def _test_vector(self, key, id_hex, fingerprint, address,
secret_key_hex, secret_key_wif,
pubkey_hex, chaincode_hex,
pubkey_serialized_hex, private_serialized_hex,
pubkey_base58, private_base58,
include_private=True
):
self.assertEqual(key.identifier, ensure_bytes(id_hex))
self.assertEqual(key.fingerprint, ensure_bytes(fingerprint))
self.assertEqual(key.to_address(), address)
self.assertEqual(key.get_public_key_hex(), ensure_bytes(pubkey_hex))
self.assertEqual(key.chain_code, ensure_bytes(chaincode_hex))
self.assertEqual(key.serialize(private=False),
ensure_bytes(pubkey_serialized_hex))
self.assertEqual(key.serialize_b58(private=False), pubkey_base58)
if include_private:
self.assertEqual(key.get_private_key_hex(),
ensure_bytes(secret_key_hex))
self.assertEqual(key.export_to_wif(), secret_key_wif)
self.assertEqual(key.serialize(),
ensure_bytes(private_serialized_hex))
self.assertEqual(key.serialize_b58(), private_base58)
def _test_deserialize(self, child, *vector):
self._test_vector(
Wallet.deserialize(child.serialize(private=True)),
*vector)
self._test_vector(
Wallet.deserialize(child.serialize(private=False)),
*vector, include_private=False)
class TestWalletVectors1(_TestWalletVectors):
@classmethod
def setUpClass(cls):
cls.master_key = Wallet.from_master_secret(
binascii.unhexlify('000102030405060708090a0b0c0d0e0f'))
def test_m(self):
"""[Chain m]"""
vector = [
'3442193e1bb70916e914552172cd4e2dbc9df811',
'0x3442193e',
'15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma',
'e8f32e723decf4051aefac8e2c93c9c5b214313817cdb01a1494b917c8436b35',
'L52XzL2cMkHxqxBXRyEpnPQZGUs3uKiL3R11XbAdHigRzDozKZeW',
'0339a36013301597daef41fbe593a02cc513d0b55527ec2df1050e2e8ff49c85c2', # nopep8
'873dff81c02f525623fd1fe5167eac3a55a049de3d314bb42ee227ffed37d508',
'0488b21e000000000000000000873dff81c02f525623fd1fe5167eac3a55a049de3d314bb42ee227ffed37d5080339a36013301597daef41fbe593a02cc513d0b55527ec2df1050e2e8ff49c85c2', # nopep8
'0488ade4000000000000000000873dff81c02f525623fd1fe5167eac3a55a049de3d314bb42ee227ffed37d50800e8f32e723decf4051aefac8e2c93c9c5b214313817cdb01a1494b917c8436b35', # nopep8
'xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8', # nopep8
'xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi', # nopep8
]
self._test_vector(self.master_key, *vector)
self._test_vector(self.master_key.get_child_for_path("m"), *vector)
self._test_deserialize(self.master_key, *vector)
def test_m_0p(self):
vector = [
'5c1bd648ed23aa5fd50ba52b2457c11e9e80a6a7',
'0x5c1bd648',
'19Q2WoS5hSS6T8GjhK8KZLMgmWaq4neXrh',
'edb2e14f9ee77d26dd93b4ecede8d16ed408ce149b6cd80b0715a2d911a0afea',
'L5BmPijJjrKbiUfG4zbiFKNqkvuJ8usooJmzuD7Z8dkRoTThYnAT',
'035a784662a4a20a65bf6aab9ae98a6c068a81c52e4b032c0fb5400c706cfccc56', # nopep8
'47fdacbd0f1097043b78c63c20c34ef4ed9a111d980047ad16282c7ae6236141',
'0488b21e013442193e8000000047fdacbd0f1097043b78c63c20c34ef4ed9a111d980047ad16282c7ae6236141035a784662a4a20a65bf6aab9ae98a6c068a81c52e4b032c0fb5400c706cfccc56', # nopep8
'0488ade4013442193e8000000047fdacbd0f1097043b78c63c20c34ef4ed9a111d980047ad16282c7ae623614100edb2e14f9ee77d26dd93b4ecede8d16ed408ce149b6cd80b0715a2d911a0afea', # nopep8
'xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw', # nopep8
'xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7', # nopep8
]
child = self.master_key.get_child(0, is_prime=True)
self._test_vector(child, *vector)
self._test_vector(self.master_key.get_child_for_path("m/0'"), *vector)
self._test_vector(self.master_key.get_child_for_path("m/0p"), *vector)
self._test_deserialize(child, *vector)
def test_m_0p_1(self):
vector = [
'bef5a2f9a56a94aab12459f72ad9cf8cf19c7bbe',
'0xbef5a2f9',
'1JQheacLPdM5ySCkrZkV66G2ApAXe1mqLj',
'3c6cb8d0f6a264c91ea8b5030fadaa8e538b020f0a387421a12de9319dc93368',
'KyFAjQ5rgrKvhXvNMtFB5PCSKUYD1yyPEe3xr3T34TZSUHycXtMM',
'03501e454bf00751f24b1b489aa925215d66af2234e3891c3b21a52bedb3cd711c', # nopep8
'2a7857631386ba23dacac34180dd1983734e444fdbf774041578e9b6adb37c19',
'0488b21e025c1bd648000000012a7857631386ba23dacac34180dd1983734e444fdbf774041578e9b6adb37c1903501e454bf00751f24b1b489aa925215d66af2234e3891c3b21a52bedb3cd711c', # nopep8
'0488ade4025c1bd648000000012a7857631386ba23dacac34180dd1983734e444fdbf774041578e9b6adb37c19003c6cb8d0f6a264c91ea8b5030fadaa8e538b020f0a387421a12de9319dc93368', # nopep8
'xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ', # nopep8
'xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs', # nopep8
]
m0 = self.master_key.get_child(0, is_prime=True)
child = m0.get_child(1, is_prime=False)
self._test_vector(child, *vector)
self._test_vector(
self.master_key.get_child_for_path("m/0'/1"), *vector)
self._test_vector(
self.master_key.get_child_for_path("m/0p/1"), *vector)
self._test_deserialize(child, *vector)
def test_m_0p_1_2p(self):
vector = [
'ee7ab90cde56a8c0e2bb086ac49748b8db9dce72',
'0xee7ab90c',
'1NjxqbA9aZWnh17q1UW3rB4EPu79wDXj7x',
'cbce0d719ecf7431d88e6a89fa1483e02e35092af60c042b1df2ff59fa424dca',
'L43t3od1Gh7Lj55Bzjj1xDAgJDcL7YFo2nEcNaMGiyRZS1CidBVU',
'0357bfe1e341d01c69fe5654309956cbea516822fba8a601743a012a7896ee8dc2', # nopep8
'04466b9cc8e161e966409ca52986c584f07e9dc81f735db683c3ff6ec7b1503f',
'0488b21e03bef5a2f98000000204466b9cc8e161e966409ca52986c584f07e9dc81f735db683c3ff6ec7b1503f0357bfe1e341d01c69fe5654309956cbea516822fba8a601743a012a7896ee8dc2', # nopep8
'0488ade403bef5a2f98000000204466b9cc8e161e966409ca52986c584f07e9dc81f735db683c3ff6ec7b1503f00cbce0d719ecf7431d88e6a89fa1483e02e35092af60c042b1df2ff59fa424dca', # nopep8
'xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5', # nopep8
'xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM', # nopep8
]
child = self.master_key.get_child(0, True).get_child(1).get_child(-2)
self._test_vector(child, *vector)
self._test_vector(
self.master_key.get_child_for_path("m/0'/1/2'"), *vector)
self._test_vector(
self.master_key.get_child_for_path("m/0p/1/2p"), *vector)
self._test_deserialize(child, *vector)
def test_m_0p_1_2p_2(self):
vector = [
'd880d7d893848509a62d8fb74e32148dac68412f',
'0xd880d7d8',
'1LjmJcdPnDHhNTUgrWyhLGnRDKxQjoxAgt',
'0f479245fb19a38a1954c5c7c0ebab2f9bdfd96a17563ef28a6a4b1a2a764ef4',
'KwjQsVuMjbCP2Zmr3VaFaStav7NvevwjvvkqrWd5Qmh1XVnCteBR',
'02e8445082a72f29b75ca48748a914df60622a609cacfce8ed0e35804560741d29', # nopep8
'cfb71883f01676f587d023cc53a35bc7f88f724b1f8c2892ac1275ac822a3edd',
'0488b21e04ee7ab90c00000002cfb71883f01676f587d023cc53a35bc7f88f724b1f8c2892ac1275ac822a3edd02e8445082a72f29b75ca48748a914df60622a609cacfce8ed0e35804560741d29', # nopep8
'0488ade404ee7ab90c00000002cfb71883f01676f587d023cc53a35bc7f88f724b1f8c2892ac1275ac822a3edd000f479245fb19a38a1954c5c7c0ebab2f9bdfd96a17563ef28a6a4b1a2a764ef4', # nopep8
'xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV', # nopep8
'xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334', # nopep8
]
node = self.master_key.get_child(0, True).get_child(1).get_child(-2)
child = node.get_child(2)
self._test_vector(child, *vector)
self._test_vector(
self.master_key.get_child_for_path("m/0'/1/2'/2"), *vector)
self._test_vector(
self.master_key.get_child_for_path("m/0p/1/2p/2"), *vector)
self._test_deserialize(child, *vector)
def test_m_0p_1_2p_2_1000000000(self):
vector = [
'd69aa102255fed74378278c7812701ea641fdf32',
'0xd69aa102',
'1LZiqrop2HGR4qrH1ULZPyBpU6AUP49Uam',
'471b76e389e528d6de6d816857e012c5455051cad6660850e58372a6c3e6e7c8',
'Kybw8izYevo5xMh1TK7aUr7jHFCxXS1zv8p3oqFz3o2zFbhRXHYs',
'022a471424da5e657499d1ff51cb43c47481a03b1e77f951fe64cec9f5a48f7011', # nopep8
'c783e67b921d2beb8f6b389cc646d7263b4145701dadd2161548a8b078e65e9e',
'0488b21e05d880d7d83b9aca00c783e67b921d2beb8f6b389cc646d7263b4145701dadd2161548a8b078e65e9e022a471424da5e657499d1ff51cb43c47481a03b1e77f951fe64cec9f5a48f7011', # nopep8
'0488ade405d880d7d83b9aca00c783e67b921d2beb8f6b389cc646d7263b4145701dadd2161548a8b078e65e9e00471b76e389e528d6de6d816857e012c5455051cad6660850e58372a6c3e6e7c8', # nopep8
'xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy', # nopep8
'xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76', # nopep8
]
child = (self.master_key.get_child(0, True)
.get_child(1).get_child(-2).get_child(2)
.get_child(1000000000))
self._test_vector(child, *vector)
self._test_vector(
self.master_key.get_child_for_path("m/0'/1/2'/2/1000000000"),
*vector)
self._test_vector(
self.master_key.get_child_for_path("m/0p/1/2p/2/1000000000"),
*vector)
self._test_deserialize(child, *vector)
class TestWalletVectors2(_TestWalletVectors):
@classmethod
def setUpClass(cls):
cls.master_key = Wallet.from_master_secret(binascii.unhexlify(
'fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a2'
'9f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542'
))
def test_m(self):
vector = [
'bd16bee53961a47d6ad888e29545434a89bdfe95',
'0xbd16bee5',
'1JEoxevbLLG8cVqeoGKQiAwoWbNYSUyYjg',
'4b03d6fc340455b363f51020ad3ecca4f0850280cf436c70c727923f6db46c3e',
'KyjXhyHF9wTphBkfpxjL8hkDXDUSbE3tKANT94kXSyh6vn6nKaoy',
'03cbcaa9c98c877a26977d00825c956a238e8dddfbd322cce4f74b0b5bd6ace4a7', # nopep8
'60499f801b896d83179a4374aeb7822aaeaceaa0db1f85ee3e904c4defbd9689',
'0488b21e00000000000000000060499f801b896d83179a4374aeb7822aaeaceaa0db1f85ee3e904c4defbd968903cbcaa9c98c877a26977d00825c956a238e8dddfbd322cce4f74b0b5bd6ace4a7', # nopep8
'0488ade400000000000000000060499f801b896d83179a4374aeb7822aaeaceaa0db1f85ee3e904c4defbd9689004b03d6fc340455b363f51020ad3ecca4f0850280cf436c70c727923f6db46c3e', # nopep8
'xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB', # nopep8
'xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U', # nopep8
]
self._test_vector(self.master_key, *vector)
self._test_deserialize(self.master_key, *vector)
def test_m_0(self):
vector = [
'5a61ff8eb7aaca3010db97ebda76121610b78096',
'0x5a61ff8e',
'19EuDJdgfRkwCmRzbzVBHZWQG9QNWhftbZ',
'abe74a98f6c7eabee0428f53798f0ab8aa1bd37873999041703c742f15ac7e1e',
'L2ysLrR6KMSAtx7uPqmYpoTeiRzydXBattRXjXz5GDFPrdfPzKbj',
'02fc9e5af0ac8d9b3cecfe2a888e2117ba3d089d8585886c9c826b6b22a98d12ea', # nopep8
'f0909affaa7ee7abe5dd4e100598d4dc53cd709d5a5c2cac40e7412f232f7c9c',
'0488b21e01bd16bee500000000f0909affaa7ee7abe5dd4e100598d4dc53cd709d5a5c2cac40e7412f232f7c9c02fc9e5af0ac8d9b3cecfe2a888e2117ba3d089d8585886c9c826b6b22a98d12ea', # nopep8
'0488ade401bd16bee500000000f0909affaa7ee7abe5dd4e100598d4dc53cd709d5a5c2cac40e7412f232f7c9c00abe74a98f6c7eabee0428f53798f0ab8aa1bd37873999041703c742f15ac7e1e', # nopep8
'xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH', # nopep8
'xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt', # nopep8
]
child = self.master_key.get_child(0)
self._test_vector(child, *vector)
self._test_deserialize(child, *vector)
def test_m_0_2147483647p(self):
vector = [
'd8ab493736da02f11ed682f88339e720fb0379d1',
'0xd8ab4937',
'1Lke9bXGhn5VPrBuXgN12uGUphrttUErmk',
'877c779ad9687164e9c2f4f0f4ff0340814392330693ce95a58fe18fd52e6e93',
'L1m5VpbXmMp57P3knskwhoMTLdhAAaXiHvnGLMribbfwzVRpz2Sr',
'03c01e7425647bdefa82b12d9bad5e3e6865bee0502694b94ca58b666abc0a5c3b', # nopep8
'be17a268474a6bb9c61e1d720cf6215e2a88c5406c4aee7b38547f585c9a37d9',
'0488b21e025a61ff8effffffffbe17a268474a6bb9c61e1d720cf6215e2a88c5406c4aee7b38547f585c9a37d903c01e7425647bdefa82b12d9bad5e3e6865bee0502694b94ca58b666abc0a5c3b', # nopep8
'0488ade4025a61ff8effffffffbe17a268474a6bb9c61e1d720cf6215e2a88c5406c4aee7b38547f585c9a37d900877c779ad9687164e9c2f4f0f4ff0340814392330693ce95a58fe18fd52e6e93', # nopep8
'xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a', # nopep8
'xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9', # nopep8
]
child = self.master_key.get_child(0).get_child(2147483647, True)
self._test_vector(child, *vector)
self._test_vector(self.master_key.get_child(0)
.get_child(-2147483647), *vector)
self._test_deserialize(child, *vector)
def test_m_0_2147483647p_1(self):
vector = [
'78412e3a2296a40de124307b6485bd19833e2e34',
'0x78412e3a',
'1BxrAr2pHpeBheusmd6fHDP2tSLAUa3qsW',
'704addf544a06e5ee4bea37098463c23613da32020d604506da8c0518e1da4b7',
'KzyzXnznxSv249b4KuNkBwowaN3akiNeEHy5FWoPCJpStZbEKXN2',
'03a7d1d856deb74c508e05031f9895dab54626251b3806e16b4bd12e781a7df5b9', # nopep8
'f366f48f1ea9f2d1d3fe958c95ca84ea18e4c4ddb9366c336c927eb246fb38cb',
'0488b21e03d8ab493700000001f366f48f1ea9f2d1d3fe958c95ca84ea18e4c4ddb9366c336c927eb246fb38cb03a7d1d856deb74c508e05031f9895dab54626251b3806e16b4bd12e781a7df5b9', # nopep8
'0488ade403d8ab493700000001f366f48f1ea9f2d1d3fe958c95ca84ea18e4c4ddb9366c336c927eb246fb38cb00704addf544a06e5ee4bea37098463c23613da32020d604506da8c0518e1da4b7', # nopep8
'xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon', # nopep8
'xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef', # nopep8
]
child = (self.master_key.get_child(0)
.get_child(2147483647, True)
.get_child(1))
self._test_vector(child, *vector)
self._test_deserialize(child, *vector)
def test_m_0_2147483647p_1_2147483646p(self):
vector = [
'31a507b815593dfc51ffc7245ae7e5aee304246e',
'0x31a507b8',
'15XVotxCAV7sRx1PSCkQNsGw3W9jT9A94R',
'f1c7c871a54a804afe328b4c83a1c33b8e5ff48f5087273f04efa83b247d6a2d',
'L5KhaMvPYRW1ZoFmRjUtxxPypQ94m6BcDrPhqArhggdaTbbAFJEF',
'02d2b36900396c9282fa14628566582f206a5dd0bcc8d5e892611806cafb0301f0', # nopep8
'637807030d55d01f9a0cb3a7839515d796bd07706386a6eddf06cc29a65a0e29',
'0488b21e0478412e3afffffffe637807030d55d01f9a0cb3a7839515d796bd07706386a6eddf06cc29a65a0e2902d2b36900396c9282fa14628566582f206a5dd0bcc8d5e892611806cafb0301f0', # nopep8
'0488ade40478412e3afffffffe637807030d55d01f9a0cb3a7839515d796bd07706386a6eddf06cc29a65a0e2900f1c7c871a54a804afe328b4c83a1c33b8e5ff48f5087273f04efa83b247d6a2d', # nopep8
'xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL', # nopep8
'xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc', # nopep8
]
child = (self.master_key.get_child(0)
.get_child(2147483647, True)
.get_child(1)
.get_child(2147483646, True))
self._test_vector(child, *vector)
self._test_deserialize(child, *vector)
def test_m_0_2147483647p_1_2147483646p_2(self):
vector = [
'26132fdbe7bf89cbc64cf8dafa3f9f88b8666220',
'0x26132fdb',
'14UKfRV9ZPUp6ZC9PLhqbRtxdihW9em3xt',
'bb7d39bdb83ecf58f2fd82b6d918341cbef428661ef01ab97c28a4842125ac23',
'L3WAYNAZPxx1fr7KCz7GN9nD5qMBnNiqEJNJMU1z9MMaannAt4aK',
'024d902e1a2fc7a8755ab5b694c575fce742c48d9ff192e63df5193e4c7afe1f9c', # nopep8
'9452b549be8cea3ecb7a84bec10dcfd94afe4d129ebfd3b3cb58eedf394ed271',
'0488b21e0531a507b8000000029452b549be8cea3ecb7a84bec10dcfd94afe4d129ebfd3b3cb58eedf394ed271024d902e1a2fc7a8755ab5b694c575fce742c48d9ff192e63df5193e4c7afe1f9c', # nopep8
'0488ade40531a507b8000000029452b549be8cea3ecb7a84bec10dcfd94afe4d129ebfd3b3cb58eedf394ed27100bb7d39bdb83ecf58f2fd82b6d918341cbef428661ef01ab97c28a4842125ac23', # nopep8
'xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt', # nopep8
'xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j', # nopep8
]
child = (self.master_key.get_child(0)
.get_child(2147483647, True)
.get_child(1)
.get_child(2147483646, True)
.get_child(2))
self._test_vector(child, *vector)
self._test_vector(self.master_key.get_child(0)
.get_child(-2147483647)
.get_child(1)
.get_child(-2147483646)
.get_child(2), *vector)
self._test_deserialize(child, *vector)
class _TestWalletVectorsBip32org(TestCase):
"""Test vectors generated with bip32.org"""
def _test(self, key, private_key_b58, private_key_wif,
pubkey_b58, pubkey_hex, address, include_private=True):
if include_private:
self.assertEqual(key.serialize_b58(), private_key_b58)
self.assertEqual(key.export_to_wif(), private_key_wif)
self.assertEqual(key.serialize_b58(private=False), pubkey_b58)
self.assertEqual(key.get_public_key_hex(), ensure_bytes(pubkey_hex))
self.assertEqual(key.to_address(), address)
def _test_deserialize(self, child, *vector):
self._test(
Wallet.deserialize(
child.serialize(private=True), network=self.network),
*vector)
self._test(
Wallet.deserialize(
child.serialize(private=False), network=self.network),
*vector, include_private=False)
class _TestWalletVectorsDogecoin(_TestWalletVectorsBip32org):
network = DogecoinMainNet
"""
This is a reduced test because Dogecoin doesn't have official vectors.
I generated these test values using http://bip32.org
"""
@classmethod
def setUpClass(cls):
cls.master_key = Wallet.deserialize(
'dgpv51eADS3spNJh8qd8KgFeT3V2QZBDSkYUqbaKDwZpDN4jd3uLcR7i6CruVDsb'
'acyx3NL2puToxM9MQYhZSsD8tBkXeQkm5btsKxpZawwPQND',
cls.network
)
class TestWalletVectorsDogecoin1(_TestWalletVectorsDogecoin):
def test_m_0p(self):
vector = [
'dgpv54rTeYviMxmUs9cNrWWrvqJZ5C6bfH7yV66f1k9p6EBtFPSiGe8X3zP9e3YyarxzcYHWgbuuc3PcNFynEYyDFNS7yNWbisqdU9nYy2bZGPD', # nopep8
'QTndqZdNU46ndUrbHzMC3rqSP5PWdE3vfEeRrUDZxEHXveLwbpta',
'dgub8ojUzErbv7RpA1GXtk8q3gr9XkUEVQ9gmgssArYntMEtoSZQgQgHhHnoDJ8Wp4swrdBSmQs7WZWp5q96TjgW8k1HpqyyfpqEvq4MD6cNMgn', # nopep8
'037379173b8d4a681c2dfe1d4ea4c0961f3087f7e52380e0d20d617ba175ba18ce', # nopep8
'DCJCTZdCiddc47n23zoaJ1cWCXpkYLfyYJ'
]
key = self.master_key.get_child(0, True)
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0p_1(self):
vector = [
'dgpv55yu5Hmd9XBBe1UNqhzUuy77eWQyBiyBGHxKrUoZGFGe3foc9AuJxVQ5e8K6C3LogwyGkEmJVwZ9kWCdg8vd61WRXpcJ6fqosi7Q69teU9r', # nopep8
'QUf7sx5yK5Jw6a9rHuMsRwYv3WrdzfMfwX7mwb6MG6CZ4T1TYcBW',
'dgub8prvQyhWhfqWvs8XswcT2pei74nc1qztYtjY1bCY4NKebivJYwT5bnojDMRLqR7pnWY46yChRSoeYYCLxGrQiWWbhvBWi6WSR6kQaabSGdN', # nopep8
'02cbcaa03b355646ac834df6bd24744f1fbe801a9168744604171b6e228f44d4b4', # nopep8
'DF1kyuBcTfUwx5rvEXWLBwjUDJZVXqyNkD'
]
key = (self.master_key.get_child(0, True)
.get_child(1))
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0p_1_2p(self):
vector = [
'dgpv585jjaM2m4VAQHfu9TQ9iGEiyeRbJbiwF3mqxoC7ER8FDc5rCtmVckFRQXH5XpwBiLduR5PjB85s2n1DBLqYAXkhuXC6AMmfw1mF9MkkiqJ', # nopep8
'QWuyvRUVSzGsPcV6r7wZD51GK8tHJ5CSHpccAdr5ojFaLSQmHXqu',
'dgub8rxm5GGvKD9Vh9L4Bh27q7nKSCoE8ikeXeZ47ub62YBFmfCYcfKGG3f4yijWNXbZDuss3HKmJAZsjkFcC63SiPUYHxLXLnXaGg2Etq6UdSD', # nopep8
'0235881bfba654b68153c3e781588d2c161defeb273ff6bd333b1075f0102c8cd7', # nopep8
'D6VUtfV7L874S9c5Vcxmek3aRV3hDS6eqR'
]
key = (self.master_key.get_child(0, True)
.get_child(1)
.get_child(2, True))
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0p_1_2p_2(self):
vector = [
'dgpv59H2Cgx4gUkZwbZH44mDznT18YtBstwstes8dR5H3fVLzXzo91dwGDFRhTHY637rus3akpLUe1EQ54rsBqxGj5ZJRrxxZc7GSw2bBX9FJWi', # nopep8
'QQMEq6rxPz7ZToTtfkGbshrNzn13kXLUUG6733rrqArzpaWCnYv5',
'dgub8tA3YNsxEdQuETDS6JPC7dzbb7Fpi1ybBFeLnXUFqnYMYb7VYnBhuWf5GgFYWN8B4vSqfSfpEgdR97QSBgMAyp3w2JHyzPCxP41nRXAMdno', # nopep8
'0238859645107ce894071e0ba4243b512d4d0fcd9fc49c0d1f1fe98ab86afe2179', # nopep8
'D5zp6rHbVHWepGxonaSG1CKAmSY7fgRZdW'
]
key = (self.master_key.get_child(0, True)
.get_child(1)
.get_child(2, True)
.get_child(2))
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0p_1_2p_2_1000000000(self):
vector = [
'dgpv5B7qzmM1EoGC3RtP2wNxfAxTZsZ8ULrAeTNgTxABfSsdUSGkwpskeHaixWSW4urESb5ATNA49QhJK39RR8wzbXJrkLbBvMiv2MzTCTB3wJR', # nopep8
'QW8D8EFkCa5JqLg4zeDwBj7iuk3jyGgEyFmwV6kUSQQhjwZk3nBv',
'dgub8uzsLTGtnwvXLHYY5Azvn2W42RvmJTssw49td4ZATZve2VPTMbRXHazNXiqqASTcaTsxoPRhsZAeiY3XA9gaJH6dJkZNUw3LwRvbVAYtuEL', # nopep8
'029aa4dc7df5d6b55058ab29b7e4020dbb7253aec3ecfa31fcd3170d2b26ec61b1', # nopep8
'DTRNwCes9k4xqLb9iD9kpSUpLpfsnQk2uw'
]
key = (self.master_key.get_child(0, True)
.get_child(1)
.get_child(2, True)
.get_child(2)
.get_child(1000000000))
self._test(key, *vector)
self._test_deserialize(key, *vector)
class TestWalletVectorsDogecoin2(_TestWalletVectorsDogecoin):
def test_m(self):
vector = [
'dgpv51eADS3spNJh8qd8KgFeT3V2QZBDSkYUqbaKDwZpDN4jd3uLcR7i6CruVDsbacyx3NL2puToxM9MQYhZSsD8tBkXeQkm5btsKxpZawwPQND', # nopep8
'QPNHZTWZzk2tdNknJqkP5SS4jwqjHwsDA4i4oPcsQ1abCck5dZzx',
'dgub8kXBZ7ymNWy2RhHHMuscZu2cs7YrGsaC8CMXP3xo1V7kB7232BfUjWGZ4VS8wHCPDNWmJdCZjo81gbpm1Co2pLyNSjpqDJYmMTGKeyAGuo9', # nopep8
'0371070e700787e78e1810b2843c0723fcf25643f9de9bb90fca95ca2941dd485c', # nopep8
'DMeAv9o4rFgDTFDhSYupoRHEwNmE98FDDi'
]
self._test(self.master_key, *vector)
self._test_deserialize(self.master_key, *vector)
self._test_deserialize(self.master_key, *vector)
def test_m_0(self):
vector = [
'dgpv54rTeYva2JEWgt3hvAU6ukxYEgeKwe9Nh5CNhYkdvRDL2SrhuWHurhsER1cbNuHrtUcRVrSgJ3so8PX7V2Bn6KLhYzq9GZickzbrsavazMV', # nopep8
'QSk4UkgRmxH6XDBofiUZad7grkSKj4NQsxyKWniaoun4Az3fmmdE',
'dgub8ojUzErTaStqyjhrxQ652cW8hF1xmmB5yfyarf9ciYGLaVyQKGqgW1GszEmFjCQkXWGV9SkCpjUfNpc4mQW1EcoBBsQoe4RV61QJC4G9X3d', # nopep8
'029820c6a6046cebadb9a40c717326b004257f4cc111010b571daacfe58b542565', # nopep8
'D6DLgbqjac4JqFQj7TkU2q5uqVAKFvAUHz'
]
key = self.master_key.get_child(0)
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0_2147483647p(self):
vector = [
'dgpv55VT18PENZdLnv1jMvtZXELhZCfzsFmFG9Qoe5ktw4oLSirqapnwx3x9nHL6jCZSRkYMKwcziQAZmKgBGbttC6kFbwoTfGWtNWnF5hFQwsk', # nopep8
'QVvKXa4BZKMok5WLJVPY9mF6YkhWaS5BJeM8pMfGBufKUKyjiwoL',
'dgub8pNULpK7viHg5mftQAWXe5tJ1m3dhNnxYkC1oC9sjBrLzmyXzbLibMMoMYSUC34ETAQM3BVUHbdgvuFFvCGLTLifbqLeiR67x2rfcndnNC8', # nopep8
'03f91b9fe3110c8cbe885666e9a86237114faf54d6e124a20320a10b7847ad8e7c', # nopep8
'DFLc2YseDUQkS9gChgzjuFA7MNUo2Kz2ch'
]
key = self.master_key.get_child(0).get_child(2147483647, True)
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0_2147483647p_1(self):
vector = [
'dgpv587FrzGt8WwxqYnVBDDrEmfR5TVjBaH6p2js6YQ6S7arktrwK9Pt4AqzcLMuUnbrCJyUnomeYHHfxp9qDrtJLUJPaEyjg263PfzYgCwXnjH', # nopep8
'QQrZo8xssr5ryo8PBUsu2ukzbGEYu8khaZnTYYDDA8p8xwqe8Ub4',
'dgub8rzHCgCmgfcJ8QSeDSqpMdD1Y1sN1hJp6dX5Feo5EEdsJwydiuwehUFeBcBPyyTMTCToL5E3DsWtMpDLD7ZxwXDHA5Ty7kxnLgK4SxsMmbg', # nopep8
'03bf5c6222396af17f76f577d5b4f1ab291ef051ae538eab1db8586f5de6112aa7', # nopep8
'DRwmLE3MgfPigYkeZ8nJArb37eCrrubVqM'
]
key = (self.master_key.get_child(0).
get_child(2147483647, True).
get_child(1))
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0_2147483647p_1_2147483646p(self):
vector = [
'dgpv5AqzQ1J7t4unSJCG7GhNnvSEnicPLNVdjB1xXseffVjc7HnkCBAJ7vDvZdqYUX3xnhhPDXqTKdwLGkuiLqMjvFoPcSbATYxEgCMYsqexwQB', # nopep8
'QP3TK4n8NaHY35rkzSYFCPX2zMXxjshjVUXcqpStspQ6HE1qsbXX',
'dgub8uj1jhE1SDa7j9rR9WKLumyqFGz2AVXM1moAgz3eTcncfLuSbwi4mDda8uKTuyiw7K66K2CwXY7KPMCGk7rQD7V6CtZ2yr4EVoBPMTqt4Bd', # nopep8
'035cd9e4427e59a367b04ca0b34be7a78968d713004bcf1917fcc11c94c04e4477', # nopep8
'DK6Rf67LRccSVPA8ew1jtDsMV2W6deokqg'
]
key = (self.master_key.get_child(0)
.get_child(2147483647, True)
.get_child(1)
.get_child(2147483646, True))
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0_2147483647p_1_2147483646p_2(self):
vector = [
'dgpv5CB6BEhuLSCN16LBqxDgpLZMtAmu88f6c376sK4FVds1PYgSyAy8dB4oMXwxmDiuodEVKEMoWzaDci3fmpi2yE9eE2jQjHcvQ2ojqQLLcrH', # nopep8
'QWLvYMin1VjHqQuS33nq23CMmACUxBxAfYLmZXYaaGuJHjp5T5Lx',
'dgub8w47WvdntarhHwzLtBqewC6xLj9XxFgotdtK2RTEHkv1wbo9NwWuGUUSvnSy57u82XKNZkxzfv6iNTcakj8VLzhtnhCpKLrdW4spZB7eosx', # nopep8
'03b5770cca42dd6159a22113a4f1970794d5db993a46a45bcd1c4ee6399003d394', # nopep8
'DNoz4kLEcUENEjUceiugpw6hGPgmFJoc7C'
]
key = (self.master_key.get_child(0)
.get_child(2147483647, True)
.get_child(1)
.get_child(2147483646, True)
.get_child(2))
self._test(key, *vector)
self._test_deserialize(key, *vector)
class _TestWalletVectorsLitecoin(_TestWalletVectorsBip32org):
network = LitecoinMainNet
"""
This is a reduced test because Litecoin doesn't have official vectors.
I generated these test values using http://bip32.org
"""
@classmethod
def setUpClass(cls):
cls.master_key = Wallet.deserialize(
'Ltpv71G8qDifUiNetGsQje8NP1KYECbgKwSP2kugo4qN9GPfJ1KB8XMXxqBTYsA5'
'PgwQaFV9u5PhKxosbjcnKKCPpPvaYVSUz24KMctXYig39Te',
cls.network
)
class TestWalletVectorsLitecoin1(_TestWalletVectorsLitecoin):
def test_m_0p(self):
vector = [
'Ltpv74V6By3UsgGzZw27UtyGEkYeGUUyP8DeDLwnVNwrkaUHxnai5mJbmAG6JHaKSnZhZMxXyhQXU4NTqqygJxKiNt1MdKgr7jEuDZ4uagqrKDa', # nopep8
'TAqpVhaoeiN17bd7keFxKc4nAhAXFaEVuXQcyRQvBh51LxPLkAAX',
'Ltub2VfRnkU27poxBoiwjWTeKLNri3BATNnJHs3pAMi9gmFtZ9mnQgM2mmNMYdmG16ksFsF3NURRQBirSkAnNTr4gm7Mq85EBCUNCopnJieQvAr', # nopep8
'027bd1f86dcd5bab63040f8f334e56d206959031df9291e4721e018e7206dcf8a2', # nopep8
'Lf9q6hjcHBcyqLbFvUAeW8XrAVtmjksK2x'
]
key = self.master_key.get_child(0, True)
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0p_1(self):
vector = [
'Ltpv76dbn3DT9k1QZk4jobJ9U4i32rWgwg361gCWCBw6YpEmWd6njuHBnqD9GsP96ZuvPzohf53SwM2WCpd9tRAyxKZ98PHehciT676FoTUJFjG', # nopep8
'T3mHwrvvGQAQYgw6NkEAbvnEW7DuVhHAoHxbrvdrRnCH9ugE3d5W',
'Ltub2XowNpdzPtYNBcma4CnXYeYFURCt1vbk6CJXsAhPV12N6zHs4pKcoSKQXHHb7cTDY9gUxQ95EBYxTTYE2cqjZmvw64uvXAqgx5f5i8DTtnd', # nopep8
'0391f78495549245157979b19b8c6ddad42f4092602819d85278ad22db87cb6730', # nopep8
'LPJZvWyD3i6JSMnwJtcLEMX7kYbPLbTqzE'
]
key = (self.master_key.get_child(0, True)
.get_child(1))
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0p_1_2p(self):
vector = [
'Ltpv77EctZnoc1SgaSffnPGVABbenBaeYBsiiNquoszDNKphMw3i4AHfJ2NaKRgKV5mUjw9qez5qRF1HTZxXegUE3W1ebdbWVETdKDMxa587Gnk', # nopep8
'T9VTp2qTW18DYLN6Rc6a2XP2cWy2mfLqNAQZDLJm27TdsZUG9jL7',
'Ltub2YQxVMDLr9yeCKNW2zksEmRsDkGqcSSNntwwUrkWJWcHxJEnP5L6JdUqZpdzFr7ijwz4xH3fL9E9jn5246F2nNHhPsSpWrk8Bu5zYDb5LTY', # nopep8
'03c0767a6c05d488b79465e973604eeeb008bfc8646877afb6237e483937beb788', # nopep8
'LdkUqtqarziHXTnC6borMadsfQesVzEJXx'
]
key = (self.master_key.get_child(0, True)
.get_child(1)
.get_child(2, True))
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0p_1_2p_2(self):
vector = [
'Ltpv7AHK5coBDoEZFnWC7WtcSJq58fWbJuM2FrmWVitWz9HoeQijqcW48v1N3aYHp8hkgYKHbzvqydDdPu6Lv6MykvxWhCfifdP5yAJQzHqWKeg', # nopep8
'T9tMP13KhuheJkJGFXaAM2REkFCDfHh5ebF1EHYDShTFG6t775gp',
'Ltub2bTegQDiTwmWsfD2N8NzWtfHaECnP9ugLNsYAheovL5QEmupAXYV9X7dHvJmMptrs9dHRkfPxY7iTYz5Mp8nAPkR26GzqCMVkiV7bVPCn9b', # nopep8
'0227bbb5af873704535c17e7cf3cbd087760d0f4027ec5a44aa35afaba6e7d0266', # nopep8
'LgML9sstrXSnbxEv5UT9VYgp2dE7Je64FH'
]
key = (self.master_key.get_child(0, True)
.get_child(1)
.get_child(2, True)
.get_child(2))
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0p_1_2p_2_1000000000(self):
vector = [
'Ltpv7CNcMprvXc2ZzLAXxhuYeKL5N62NnpDsHNnS3gxFr7eJndZL3vKrXrvyMR2miDk7LSrRKQ7gSKWQwhCMLxuazuRFuCCZNbM5NXUBTrwAJwb', # nopep8
'TAefeF5TJVCTFRozizQtVJ9ku6gs1EdBKko3yea23o6w3ESVhQZY',
'Ltub2dYwxcHTmkZXcCsNDKPviuAHoeiZs4nXMttTifiYnJRuNzkQNqNHYU3EbkUg3oz1WvkLs9vnNNGPa5vgHbeBjWZZ7YQbERi8E9GnAzXnVHw', # nopep8
'02145bf57dcfe571710c61143adb44e80dd2ca44910b89406862962545fa567c96', # nopep8
'LQWUfR2ybmJGyLSps2fVSTCa9zmr9p9RQi'
]
key = (self.master_key.get_child(0, True)
.get_child(1)
.get_child(2, True)
.get_child(2)
.get_child(1000000000))
self._test(key, *vector)
self._test_deserialize(key, *vector)
class TestWalletVectorsLitecoin2(_TestWalletVectorsLitecoin):
def test_m(self):
vector = [
'Ltpv71G8qDifUiNetGsQje8NP1KYECbgKwSP2kugo4qN9GPfJ1KB8XMXxqBTYsA5PgwQaFV9u5PhKxosbjcnKKCPpPvaYVSUz24KMctXYig39Te', # nopep8
'T4HX1Wffx49Wbdfog3RF31m7P611LT8KPc17ZB4USQTMCZhazwNn',
'Ltub2SSUS19CirucW9aEzFckTb9kfmHsQC137H1iU3bf5TBFtNWFTSPxySHioHCHEtCb3NPSZn1FJM6joFKevvxx6vV4ggaQcKiYzaNucXpRyY8', # nopep8
'03b3204919fa92d16d869fc39f3510e0bc7b2ce53c1bf6124448f2cbbbaf29db38', # nopep8
'Lbs921f129AWWyb5kfdtSefUgreidPwqAP'
]
self._test(self.master_key, *vector)
self._test_deserialize(self.master_key, *vector)
self._test_deserialize(self.master_key, *vector)
def test_m_0(self):
vector = [
'Ltpv74V6By3LY1k2RyNdNhyBCtTgxbu6VrSDx8177z5g9phb8mmiJnC5dyGEL1AxCX4BWWJEcZBxep1j7wAPUp3jXqramror3Rdtg76ZNfwqeMr', # nopep8
'T9ARV5FUdiaXyp8boDPj9H2FjWxvdEUHaURrgsVeZjVFGTesNfku',
'Ltub2VfRnkTsnAGz3r5TdKTZHUHuQAbHa6zt2e78nxqy61VBj8xndhEWeaNVaQ6N8SLEPiF8UxEXkFBbPyKiutX6FhbKJXnQAaTRXw56e8zm2qs', # nopep8
'03b18ba94530690859a3f6ebb2b866d151f8499b3164d027ba5b464e4ed71329aa', # nopep8
'LftGBdKxo8aXqvs74g71692apDXQaPz17Z'
]
key = self.master_key.get_child(0)
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0_2147483647p(self):
vector = [
'Ltpv76h2CpMgsQfUbzphcJBPEBDEeP8U9WizJtRsxwTzq8LeSE6eVhZ5nzvPPxK8HifWH3GCpys6qWnzpBeFVVBaKQRki3tEN82PJUi3SdBQZcr', # nopep8
'T3kqK41soU9GEWrDYERqqHo8kcQvVhu9J8tmk8RRqshTi5FWUtBw',
'Ltub2XsMobnE7ZCSDsXXrufmJm3T5wpfDmHePQXudvEHmK8F2bHipcbWoc2eeNsqqjXRCgLQgkV1jS83mAZBtHqyaqPu1hkyUU1bhoQ9Pd2Fry2', # nopep8
'03e89f5654eb8489c71bb68f9df7d28c6f48a0f46c6fc2bef5ae11bb5536cebbb7', # nopep8
'LbMKq3XN8iUCJnCfxM8SgC6YGjoMazcrz3'
]
key = self.master_key.get_child(0).get_child(2147483647, True)
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0_2147483647p_1(self):
vector = [
'Ltpv78CxVKndj3P8PDBogzwnrxY7Pxtzn8vgExPZFVHeM42MGGNQ9wUJ2isvXPDHLGehGi8DdeFgNPQTGRMEKH72242jutfwB7PFbeKMhSA3pqk', # nopep8
'T3MWHKyN3BVcQ67wLR5H7rsJx3bhmbichmz3p38FeCrTjXE9Xbup',
'Ltub2ZPJ67DAyBv615tdwcSAwYNKqXbBrPVLKUVavU3wHEowrdZUUrWj3KzBmoW21GQSDZBNjMs2SZ2bdBREZLH3HcHT7W2DT6DZiNsNtsEZfqF', # nopep8
'03b855e07eb1837015cbd465b921fb476f99b38cf575f61ec7f594f839f42b5057', # nopep8
'Lbqhtn3eQTiyYKWvktnxGsgSa3bEa4kNrH'
]
key = (self.master_key.get_child(0).
get_child(2147483647, True).
get_child(1))
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0_2147483647p_1_2147483646p(self):
vector = [
'Ltpv7A8P4c3YUGWoxQAsSeGJfm45zJW5fRcVqcvo9a1RifrR2z2EokC4JUnfb23mZDG1a5obbdA3KyfriaxfxdeiCCszjUwy4Tnx4NyZJhg2TXZ', # nopep8
'T3JRr9ymVwWqVhsed96q3hcmKVgmqw2SKGQWepCkCTEmS8bDGRiL',
'Ltub2bJifPU5iR3maGshhFkgkLtJRsCGjgB9v92ppYmiere1dMDK8fEVK5tvqR9xBf37tXswdRY7T92jbT9L1borcpBXMhYe2cxiKB3HTCtKJf4', # nopep8
'031b189497b7661fb452af508ee3e014aaac34b366fc4ea178573bf9263a824bc6', # nopep8
'LPeSxqQM6qyFVAutnGwRDZbABLSMK7gtDD'
]
key = (self.master_key.get_child(0)
.get_child(2147483647, True)
.get_child(1)
.get_child(2147483646, True))
self._test(key, *vector)
self._test_deserialize(key, *vector)
def test_m_0_2147483647p_1_2147483646p_2(self):
vector = [
'Ltpv7B2Va6jguSVYkxsVaYVykvfVURqLTNQVn2pYs6MHFzVtFHPUAgCqbCMChCFPmvjDKfFJZQBmyztATaZTeLpaSvpP6zcaY5DJD5Qcr66MjTW', # nopep8
'T6u7ZdbVd4B8KWuiTdirhwJ5NNrHG73WWVatXtktmGayPERcFMMa',
'Ltub2cCqAtAE9b2WNqaKq9zMqWVhuzXXXcy9rYvaY57aCBHUqeaYVbFGboTTwbhrJc6SezAA3mrUEKi3qey31HZHDnFfcwXYtkD3dbswWCRyKQu', # nopep8
'03b47c7d3f7eb51023206f636276fe6c3a0c51752360b12ec556b86849ca47b3fe', # nopep8
'Ld5QMVg5tych8UKBBs1Q2LxbbVFASfv3tf'
]
key = (self.master_key.get_child(0)
.get_child(2147483647, True)
.get_child(1)
.get_child(2147483646, True)
.get_child(2))
self._test(key, *vector)
self._test_deserialize(key, *vector)
| |
#!/usr/bin/env python
# for instrument/phone setting
# William Chang
#Test for git
#Instrument setting
Instrument_GPIB = 14 #GPIB
Average_times = 20 # average times for Txp and ACLR measurement
IMSI = '001010123456789'
path_loss = {700: -0.3, 1200: -0.3, 1500: -0.6, 2300: -0.6, 2500: -0.7, 2700: -0.7} #initiate path loss table (using dict)
#Anritsu 8820C setting
Integrity = 1 # Integrity ON|OFF
#Phone settings
bUseQPST = 1 # bUseQPST = true to use QPST, FALSE to use QPHONEMS
Phone_Com_Port = 15 # Phone COM port
bSet_WCDMA_Waveform = 0 # For GLU(MSM8974), not to set WCDMA waveform
bSet_CDMA_Waveform = 1 # Reserve this parameter
PDM_init = 88 # Start PDM (High gain mode default PDM) (RTR6285:210, WTR1605:90)
PDM_low = 50 # PDM for -20dBm (Low gain mode PDM) (RTR6285:140, WTR1605:60)
PDM_max = 255 # PDM Max (RTR6285:255, WTR1605:127)
PDM_min = 0
iPArange_high = 0 # for high gain mode (RTR6285:3, WTR1605:0)
iPArange_low = 3 # for low gain mode (RTR6285:0, WTR1605:3)
PA_range_map = dict(B7=(0,1), # for different Band-PA setting => Band=(HPM,LPM)
B20=(0,3),
)
"""
0 - R0 = 0, R1 = 0,
1 - R0 = 1, R1 = 0,
2 - R0 = 0, R1 = 1,
3 - R0 = 1, R1 = 1
"""
SMPS_ON_init = 1 #SMPS ON(1)/OFF(0)
SMPS_init = 3400 #SMPS value (for High gain mode) (MSM8x25/RTR6285:380/511, MSM8x30/WTR1605:780, MSM8974/WTR1605:1000)
SMPS_low = 1000 #SMPS value for -20dBm (for Low gain mode) (MSM8x25/RTR6285:95, MSM8x30/WTR1605:230, MSM8974/WTR1605:1000)
# Tuning sweep
TARGET_PWR = 23.5
PDM_start = 80
PDM_end = 90
# Tested Power Supply list
Power_Suppply_list = ["GOOD WILL;PPT-1830;", "HEWLETT-PACKARD,E3631A,", "Agilent Technologies,66319D"]
# WCDMA attributes
#channel list
WCDMA_B1_DL_ch = [10562, 10700, 10838]
WCDMA_B1_UL_ch = [9612, 9750, 9888]
WCDMA_B2_DL_ch = [9662, 9800,9938]
WCDMA_B2_UL_ch = [9262, 9400, 9538]
WCDMA_B4_DL_ch = [1537, 1675, 1738]
WCDMA_B4_UL_ch = [1312, 1450, 1513]
WCDMA_B5_DL_ch = [4357, 4400, 4458]
WCDMA_B5_UL_ch = [4132, 4175, 4233]
WCDMA_B8_DL_ch = [2937, 3013, 3088]
WCDMA_B8_UL_ch = [2712, 2788, 2863]
WCDMA_B9_DL_ch = [9237, 9312, 9387]
WCDMA_B9_UL_ch = [8762, 8837, 8912]
WCDMA_B19_DL_ch = [712, 738, 763]
WCDMA_B19_UL_ch = [312, 338, 363]
#band-channel mapping
Band_DL_ch_map = dict(B1=WCDMA_B1_DL_ch, B2=WCDMA_B2_DL_ch,
B4=WCDMA_B4_DL_ch, B5=WCDMA_B5_DL_ch,
B8=WCDMA_B8_DL_ch, B9=WCDMA_B9_DL_ch, B19=WCDMA_B19_DL_ch)
Band_UL_ch_map = dict(B1=WCDMA_B1_UL_ch, B2=WCDMA_B2_UL_ch,
B4=WCDMA_B4_UL_ch, B5=WCDMA_B5_UL_ch,
B8=WCDMA_B8_UL_ch, B9=WCDMA_B9_UL_ch, B19=WCDMA_B19_UL_ch)
# LTE attributes
# channel list
LTE_B1_DL_ch_5M = [25, 300, 575]
LTE_B1_UL_ch_5M = [18025, 18300, 18575]
LTE_B2_DL_ch_5M = [625, 900, 1175]
LTE_B2_UL_ch_5M = [18625, 18900, 19175]
LTE_B3_DL_ch_5M = [1225, 1575, 1925]
LTE_B3_UL_ch_5M = [19225, 19575, 19925]
LTE_B4_DL_ch_5M = [1975, 2175, 2375]
LTE_B4_UL_ch_5M = [19975, 20175, 20375]
LTE_B5_DL_ch_5M = [2425, 2525, 2625]
LTE_B5_UL_ch_5M = [20425, 20525, 20625]
LTE_B7_DL_ch_5M = [2775, 3100, 3425]
LTE_B7_UL_ch_5M = [20775, 21100, 21425]
LTE_B8_DL_ch_5M = [3475, 3625, 3775]
LTE_B8_UL_ch_5M = [21475, 21625, 21775]
LTE_B11_DL_ch_5M = [4775, 4850, 4925]
LTE_B11_UL_ch_5M = [22775, 22850, 22925]
LTE_B13_DL_ch_5M = [5205, 5230, 5255]
LTE_B13_UL_ch_5M = [23205, 23230, 23255]
LTE_B17_DL_ch_5M = [5755, 5790, 5825]
LTE_B17_UL_ch_5M = [23755, 23790, 23825]
LTE_B19_DL_ch_5M = [6025, 6075, 6125]
LTE_B19_UL_ch_5M = [24025, 24075, 24125]
LTE_B20_DL_ch_5M = [6175, 6300, 6425]
LTE_B20_UL_ch_5M = [24175, 24300, 24425]
LTE_B21_DL_ch_5M = [6475, 6525, 6575]
LTE_B21_UL_ch_5M = [24475, 24525, 24575]
LTE_B25_DL_ch_5M = [8065, 8365, 8665]
LTE_B25_UL_ch_5M = [26065, 26365, 26665]
LTE_B26_DL_ch_5M = [8715, 8865, 9015]
LTE_B26_UL_ch_5M = [26715, 26865, 27015]
LTE_B28A_DL_ch_5M = [9235, 9335, 9434]
LTE_B28A_UL_ch_5M = [27235, 27335, 27434]
LTE_B28B_DL_ch_5M = [9435, 9535, 9635]
LTE_B28B_UL_ch_5M = [27435, 27535, 27635]
LTE_B30_DL_ch_5M = [9795, 9820, 9845]
LTE_B30_UL_ch_5M = [27685, 27710, 27735]
# band-channel mapping
LTE_Band_DL_ch_map_5M = dict(B1=LTE_B1_DL_ch_5M, B2=LTE_B2_DL_ch_5M, B3=LTE_B3_DL_ch_5M, B4=LTE_B4_DL_ch_5M, B5=LTE_B5_DL_ch_5M, B7=LTE_B7_DL_ch_5M,
B8=LTE_B8_DL_ch_5M, B11=LTE_B11_DL_ch_5M, B13=LTE_B13_DL_ch_5M, B17=LTE_B17_DL_ch_5M,
B19=LTE_B19_DL_ch_5M, B20=LTE_B20_DL_ch_5M, B21=LTE_B21_DL_ch_5M, B25=LTE_B25_DL_ch_5M,
B26=LTE_B26_DL_ch_5M, B281=LTE_B28A_DL_ch_5M, B282=LTE_B28B_DL_ch_5M, B30=LTE_B30_DL_ch_5M)
LTE_Band_UL_ch_map_5M = dict(B1=LTE_B1_UL_ch_5M, B2=LTE_B2_UL_ch_5M, B3=LTE_B3_UL_ch_5M, B4=LTE_B4_UL_ch_5M, B5=LTE_B5_UL_ch_5M, B7=LTE_B7_UL_ch_5M,
B8=LTE_B8_UL_ch_5M, B11=LTE_B11_UL_ch_5M, B13=LTE_B13_UL_ch_5M, B17=LTE_B17_UL_ch_5M,
B19=LTE_B19_UL_ch_5M, B20=LTE_B20_UL_ch_5M, B21=LTE_B21_UL_ch_5M, B25=LTE_B25_UL_ch_5M,
B26=LTE_B26_UL_ch_5M, B281=LTE_B28A_UL_ch_5M, B282=LTE_B28B_UL_ch_5M, B30=LTE_B30_UL_ch_5M)
# GSM attributes
# channel list
GSM_EGSM_DL_ch = [975, 37, 124]
GSM_EGSM_UL_ch = GSM_EGSM_DL_ch
GSM_GSM850_DL_ch = [128, 190, 251]
GSM_GSM850_UL_ch = GSM_GSM850_DL_ch
GSM_DCS_DL_ch = [512, 700, 885]
GSM_DCS_UL_ch = GSM_DCS_DL_ch
GSM_PCS_DL_ch = [512, 661, 810]
GSM_PCS_UL_ch = GSM_PCS_DL_ch
# band-channel mapping
GSM_Band_DL_ch_map = dict(EGSM=GSM_EGSM_DL_ch, GSM850=GSM_GSM850_DL_ch, DCS=GSM_DCS_DL_ch, PCS=GSM_PCS_DL_ch)
GSM_Band_UL_ch_map = GSM_Band_DL_ch_map
# C2k attributes
# channel list
CDMA_BC0_DL_ch = [1013, 384, 777]
CDMA_BC0_UL_ch = CDMA_BC0_DL_ch
CDMA_BC1_DL_ch = [25, 600, 1175]
CDMA_BC1_UL_ch = CDMA_BC1_DL_ch
CDMA_BC10_DL_ch = [476, 562, 684]
CDMA_BC10_UL_ch = CDMA_BC10_DL_ch
# band-channel mapping
CDMA_Band_DL_ch_map = dict(BC0=CDMA_BC0_DL_ch, BC1=CDMA_BC1_DL_ch, BC10=CDMA_BC10_DL_ch)
CDMA_Band_UL_ch_map = CDMA_Band_DL_ch_map
# Below is QMSL defined variable
# Just copy from QLib_Defines.h. It should be better way to include or reference, but I don't know at this moment.
# Definition of the COM port value that will be used to "auto detect" the COM port
QLIB_COM_AUTO_DETECT = 0xFFFF
# Phone modes
MODE_OFFLINE_A_F = 0 #Go to offline analog
MODE_OFFLINE_D_F = 1 #Go to offline digital
MODE_RESET_F = 2 #Reset. Only exit from offline
MODE_FTM_F = 3 #FTM mode
MODE_ONLINE_F = 4 #Go to Online
MODE_LPM_F = 5 #Low Power Mode (if supported)
MODE_POWER_OFF_F = 6 #Power off (if supported)
MODE_MAX_F = 7 #Last (and invalid) mode enum value
# Phone logging settings
LOG_NOTHING = 0x0000 # log nothing
LOG_C_HIGH_LEVEL_START = 0x0200 # High level C function start, indicates the begining of a high level C function, which
# calls other low level C functions internal to the library
LOG_C_HIGH_LEVEL_STOP = 0x4000 # High level C function stop
LOG_IO = 0x0001 # data IO (data bytes)
LOG_FN = 0x0002 # function calls with parameters
LOG_RET = 0x0004 # function return data
LOG_INF = 0x0008 # general information (nice to know)--do not use this one, as
# this space needs to be reserved for async messages
LOG_ASYNC = 0x0008 # asynchronous messages
LOG_ERR = 0x0010 # critical error information
LOG_IO_AHDLC = 0x0020 # HDLC IO tracing (data bytes)
LOG_FN_AHDLC = 0x0040 # HDLC layer function calls
LOG_RET_AHDLC = 0x0080 # HDLC function return data
LOG_INF_AHDLC = 0x0100 # HDLC general information
LOG_ERR_AHDLC = LOG_INF_AHDLC # HDLC Error info merged with LOG_INF_AHDLC, to free up the log bit
LOG_IO_DEV = 0x0400 # device IO tracing (data bytes)
LOG_FN_DEV = 0x0800 # device layer function calls
LOG_RET_DEV = 0x1000 # device function return data
LOG_INF_DEV = 0x2000 # device general information
LOG_ERR_DEV = LOG_INF_DEV # device error information, merged with LOG_INF_DEV to free up the log bit
LOG_DEFAULT = (LOG_C_HIGH_LEVEL_START|LOG_C_HIGH_LEVEL_STOP|LOG_FN|LOG_IO|LOG_RET|LOG_ERR|LOG_ASYNC) # default settings
LOG_ALL = 0xFFFF # everything
# Set FTM Mode
FTM_MODE_ID_CDMA_1X = 0
FTM_MODE_ID_WCDMA = 1
FTM_MODE_ID_GSM = 2
FTM_MODE_ID_CDMA_1X_RX1 = 3
FTM_MODE_ID_BLUETOOTH = 4
FTM_MODE_ID_CDMA_1X_CALL= 7
FTM_MODE_ID_LOGGING = 9
FTM_MODE_ID_AGPS = 10
FTM_MODE_ID_PMIC = 11
FTM_MODE_GSM_BER = 13
FTM_MODE_ID_AUDIO = 14
FTM_MODE_ID_CAMERA = 15
FTM_MODE_WCDMA_BER = 16
FTM_MODE_ID_GSM_EXTENDED_C = 17
FTM_MODE_CDMA_API_V2 = 18
FTM_MODE_ID_MF_C = 19
FTM_MODE_RF_COMMON = 20
FTM_MODE_WCDMA_RX1 = 21
FTM_MODE_ID_LTE = 29 # LTE FTM Calibration
FTM_MODE_LTE_NS = 30 # LTE FTM Non-Signaling
FTM_MODE_CDMA_C2 = 32
FTM_MODE_CDMA_C3 = 40
FTM_MODE_CDMA_C4 = 45
FTM_MODE_ID_PRODUCTION = 0x8000
FTM_MODE_ID_LTM = 0x8001 # LTM
# For FTM Mode/Band setting
PHONE_MODE_FM = 1 #(FM)
PHONE_MODE_SLEEP = 2 #(Sleep Mode)
PHONE_MODE_GPS = 3 #(GPS)
PHONE_MODE_GPS_SINAD = 4 #(GPS SINAD)
PHONE_MODE_CDMA_800 = 5 #(CDMA 800)
PHONE_MODE_CDMA_1900 = 6 #(CDMA 1900)
PHONE_MODE_CDMA_1800 = 8 #(CDMA 1800)
PHONE_MODE_J_CDMA = 14 #(JCDMA)
PHONE_MODE_CDMA_450 = 17 #(CDMA 450)
PHONE_MODE_CDMA_IMT = 19 #(CDMA IMT)
PHONE_MODE_CDMA_1900_EXT = 26 # Secndary CDMA 1900MHz Band, Band Class 14
PHONE_MODE_CDMA_450_EXT = 27 # CDMA BC 11 (450 Extension)
PHONE_MODE_CDMA_800_SEC = 33 # Secondary CDMA 800MHz Band, Band Class 10
PHONE_MODE_WCDMA_IMT =9 #(WCDMA IMT, Band I)
PHONE_MODE_GSM_900 =10 #(GSM 900)
PHONE_MODE_GSM_1800 =11 #(GSM 1800)
PHONE_MODE_GSM_1900 =12 #(GSM 1900)
PHONE_MODE_WCDMA_1900A =15 #(WCDMA 1900 A, Band II Add)
PHONE_MODE_WCDMA_1900B =16 #(WCDMA 1900 B, Band II Gen)
PHONE_MODE_GSM_850 =18 #(GSM 850)
PHONE_MODE_WCDMA_800 =22 #(WCDMA 800, Band V Gen)
PHONE_MODE_WCDMA_800A =23 #(WCDMA 800, Band V Add)
PHONE_MODE_WCDMA_1800 =25 #(WCDMA 1800, Band III)
PHONE_MODE_WCDMA_BC4 =28 #(WCDMA BC4-used for both Band IV Gen and Band IV Add)
PHONE_MODE_WCDMA_BC8 =29 #(WCDMA BC8, Band VIII)
PHONE_MODE_MF_700 =30 #(MediaFLO)
PHONE_MODE_WCDMA_BC9 =31 #(WCDMA BC9 (1750MHz & 1845MHz), Band IX)
PHONE_MODE_CDMA_BC15 =32 #(CDMA Band Class 15)
PHONE_MODE_LTE_B1 =34 #(LTE Band Class 1)
PHONE_MODE_LTE_B7 =35 #(LTE Band Class 7)
PHONE_MODE_LTE_B4 =42 #(LTE Band Class 4)
PHONE_MODE_LTE_B11 =41 #(LTE Band Class 11)
PHONE_MODE_LTE_B13 =36 #(LTE Band Class 13)
PHONE_MODE_LTE_B17 =37 #(LTE Band Class 17)
PHONE_MODE_LTE_B38 =38 #(LTE Band Class 38)
PHONE_MODE_LTE_B40 =39 #(LTE Band Class 40)
PHONE_MODE_WCDMA_1500 =40
PHONE_MODE_LTE_B11 = 41
PHONE_MODE_LTE_B2=43
PHONE_MODE_LTE_B3=44
PHONE_MODE_LTE_B5=45
PHONE_MODE_LTE_B6=46
PHONE_MODE_LTE_B8=47
PHONE_MODE_LTE_B9=48
PHONE_MODE_LTE_B10=49
PHONE_MODE_LTE_B12=50
PHONE_MODE_LTE_B14=51
PHONE_MODE_LTE_B15=52
PHONE_MODE_LTE_B16=53
PHONE_MODE_LTE_B18=54
PHONE_MODE_LTE_B19=55
PHONE_MODE_LTE_B20=56
PHONE_MODE_LTE_B21=57
PHONE_MODE_LTE_B22=58
PHONE_MODE_LTE_B23=59
PHONE_MODE_LTE_B24=60
PHONE_MODE_LTE_B25=61
PHONE_MODE_LTE_B26=62
PHONE_MODE_LTE_B27=63
PHONE_MODE_LTE_B28=64
PHONE_MODE_LTE_B29=65
PHONE_MODE_LTE_B30=66
PHONE_MODE_LTE_B31=67
PHONE_MODE_LTE_B32=68
PHONE_MODE_LTE_B33=69
PHONE_MODE_LTE_B34=70
PHONE_MODE_LTE_B35=71
PHONE_MODE_LTE_B36=72
PHONE_MODE_LTE_B37=73
PHONE_MODE_LTE_B39=74
PHONE_MODE_WCDMA_BC19=75
PHONE_MODE_LTE_B41=76
#TDSCDMA reserves 90 - 99
PHONE_MODE_TDSCDMA_B34=90
PHONE_MODE_TDSCDMA_B39=91
PHONE_MODE_TDSCDMA_B40=92
PHONE_MODE_MAX =255 #(Last possible value, not a valid mode)
# LTE Bandwidth
RFCOM_BW_LTE_1P4MHz = 0
RFCOM_BW_LTE_3MHz = 1
RFCOM_BW_LTE_5MHz = 2
RFCOM_BW_LTE_10MHz = 3
RFCOM_BW_LTE_15MHz = 4
RFCOM_BW_LTE_20MHz = 5
#GSM TX DataSources Enum
FTM_GSM_TX_DATA_SOURCE_PSDRND = 0 #Pseudorandom
FTM_GSM_TX_DATA_SOURCE_TONE = 1 #Single tone
FTM_GSM_TX_DATA_SOURCE_BUFFER = 2 #Buffer
FTM_GSM_TX_DATA_SOURCE_TWOTONE = 3 #2 tone
#Band-QMSL variable mapping
Band_QMSL_map = dict(B1=PHONE_MODE_WCDMA_IMT, B2=PHONE_MODE_WCDMA_1900B,
B4=PHONE_MODE_WCDMA_BC4, B5=PHONE_MODE_WCDMA_800, B8=PHONE_MODE_WCDMA_BC8,
B9=PHONE_MODE_WCDMA_BC9,B19=PHONE_MODE_WCDMA_BC19)
LTE_Band_QMSL_map = dict(B1=PHONE_MODE_LTE_B1, B2=PHONE_MODE_LTE_B2, B3=PHONE_MODE_LTE_B3, B4=PHONE_MODE_LTE_B4, B5=PHONE_MODE_LTE_B5, B7=PHONE_MODE_LTE_B7,
B8=PHONE_MODE_LTE_B8, B11=PHONE_MODE_LTE_B11, B13=PHONE_MODE_LTE_B13, B17=PHONE_MODE_LTE_B17,
B19=PHONE_MODE_LTE_B19, B20=PHONE_MODE_LTE_B20, B21=PHONE_MODE_LTE_B21, B25=PHONE_MODE_LTE_B25,
B26=PHONE_MODE_LTE_B26, B281=PHONE_MODE_LTE_B28, B282=PHONE_MODE_LTE_B28, B30=PHONE_MODE_LTE_B30)
GSM_Band_QMSL_map = dict(EGSM=PHONE_MODE_GSM_900, GSM850=PHONE_MODE_GSM_850, DCS=PHONE_MODE_GSM_1800, PCS=PHONE_MODE_GSM_1900)
CDMA_Band_QMSL_map = dict(BC0=PHONE_MODE_CDMA_800, BC1=PHONE_MODE_CDMA_1900, BC10=PHONE_MODE_CDMA_800_SEC)
#Anritsu 8820C CALL Status
ANRITSU_OFF = 0 #Call processing function set to Off
ANRITSU_IDLE = 1 #Idle state
ANRITSU_IDLE_REGIST = 2 #Idle( Regist ) Idle state (location registered)
ANRITSU_REGIST = 3 # Under location registration
ANRITSU_ORIGIN = 4 # Origination from a terminal
ANRITSU_TERMIN = 5 # Origination from the MT8815B/MT8820B (network)
ANRITSU_COMMUN = 6 # Under communication
ANRITSU_LOOP_1 = 7 # Loopback mode 1
ANRITSU_LOOP_1_OPEN = 8 # Loopback mode 1 open
ANRITSU_LOOP_1_CLOSE = 9 # Loopback mode 1 close
ANRITSU_LOOP_2 = 10 # Loopback mode 2
ANRITSU_LOOP_2_OPEN = 11 # Loopback mode 2 open
ANRITSU_LOOP_2_CLOSE = 12 # Loopback mode 2 close
ANRITSU_HAND = 13 # Under handover
ANRITSU_NW_RELEASE = 14 # Release by the MT8815B/MT8820B (network)
ANRITSU_UE_RELEASE = 15 # Release by a terminal
ANRITSU_OTHER = 16 # Other
| |
import unittest
import functools
import math
import mock
import numpy
from operator import mul
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import conv
import pooling_nd_helper
@testing.parameterize(*testing.product({
'dims': [(4,), (4, 3), (4, 3, 2)],
'cover_all': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestMaxPoolingND(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
# Avoid unstability of numerical gradient
x_shape = (2, 3) + self.dims
self.x = numpy.arange(
functools.reduce(mul, x_shape), dtype=self.dtype).reshape(x_shape)
self.x = 2 * self.x / self.x.size - 1
outs = tuple(conv.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p)
in six.moves.zip(
self.dims, self.ksize, self.stride, self.pad))
gy_shape = (2, 3) + outs
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {
'atol': 1e-03, 'rtol': 1e-03}
def check_forward(self, x_data, use_cudnn='always'):
dims = self.dims
ksize = self.ksize
stride = self.stride
pad = self.pad
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', use_cudnn):
y = functions.max_pooling_nd(x, ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
patches = pooling_nd_helper.pooling_patches(
dims, ksize, stride, pad, self.cover_all)
for k in six.moves.range(2):
for c in six.moves.range(3):
x = self.x[k, c]
expect = numpy.array([x[idx].max() for idx in patches])
expect = expect.reshape(y_data.shape[2:])
testing.assert_allclose(expect, y_data[k, c])
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, use_cudnn='never')
def test_forward_cpu_wide(self): # see #120
ndim = self.ndim
x_shape = (2, 3) + (15,) * ndim
x_data = numpy.random.rand(*x_shape).astype(self.dtype)
x = chainer.Variable(x_data)
ksize = stride = int(math.ceil(pow(32, 1.0 / ndim)))
functions.max_pooling_nd(x, ksize, stride=stride, pad=0)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.cudnn
@condition.retry(3)
def test_forward_gpu_non_contiguous(self):
self.check_forward(cuda.cupy.asfortranarray(cuda.to_gpu(self.x)))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), 'never')
def check_forward_consistency_regression(self, x_data, use_cudnn='always'):
# Regression test to max_pooling_2d.
if len(self.dims) != 2:
return
ksize = self.ksize
stride = self.stride
pad = self.pad
with chainer.using_config('use_cudnn', use_cudnn):
y_nd = functions.max_pooling_nd(self.x, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
y_2d = functions.max_pooling_2d(self.x, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
testing.assert_allclose(y_nd.data, y_2d.data)
@condition.retry(3)
def test_forward_consistency_regression_cpu(self):
self.check_forward_consistency_regression(self.x)
@attr.cudnn
@condition.retry(3)
def test_forward_consistency_regression_gpu(self):
self.check_forward_consistency_regression(cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_forward_consistency_regression_no_cudnn(self):
self.check_forward_consistency_regression(cuda.to_gpu(self.x), 'never')
def check_backward(self, x_data, y_grad, use_cudnn='always'):
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_backward(
functions.MaxPoolingND(
self.ndim, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all),
x_data, y_grad, dtype='d', **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.cudnn
@condition.retry(3)
def test_backward_gpu_non_contiguous(self):
self.check_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), 'never')
def check_backward_consistency_regression(self, x_data, gy_data,
use_cudnn='always'):
# Regression test to two-dimensional max pooling layer.
if len(self.dims) != 2:
return
ksize = self.ksize
stride = self.stride
pad = self.pad
xp = cuda.get_array_module(x_data)
# Backward computation for N-dimensional max pooling layer.
x_nd = chainer.Variable(xp.array(x_data))
with chainer.using_config('use_cudnn', use_cudnn):
func_nd = functions.MaxPoolingND(self.ndim, ksize, stride=stride,
pad=pad, cover_all=self.cover_all)
y_nd = func_nd(x_nd)
y_nd.grad = gy_data
y_nd.backward()
# Backward computation for two-dimensional max pooling layer.
x_2d = chainer.Variable(xp.array(x_data))
with chainer.using_config('use_cudnn', use_cudnn):
func_2d = functions.MaxPooling2D(ksize, stride=stride, pad=pad,
cover_all=self.cover_all)
y_2d = func_2d(x_2d)
y_2d.grad = gy_data
y_2d.backward()
# Test that the two result gradients are close enough.
testing.assert_allclose(x_nd.grad, x_2d.grad)
@condition.retry(3)
def test_backward_consistency_regression_cpu(self):
self.check_backward_consistency_regression(self.x, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_consistency_regression_gpu(self):
self.check_backward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_consistency_regression_no_cudnn(self):
self.check_backward_consistency_regression(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), use_cudnn='never')
def test_backward_cpu_more_than_once(self):
func = functions.MaxPoolingND(
self.ndim, self.ksize, stride=self.stride, pad=self.pad,
cover_all=self.cover_all)
func(self.x)
func.backward_cpu((self.x,), (self.gy,))
func.backward_cpu((self.x,), (self.gy,))
@testing.parameterize(*testing.product({
'dims': [(4, 3, 2), (3, 2), (2,)],
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestMaxPoolingNDCudnnCall(unittest.TestCase):
def setUp(self):
self.ndim = len(self.dims)
self.ksize = (3,) * self.ndim
self.stride = (2,) * self.ndim
self.pad = (1,) * self.ndim
x_shape = (2, 3) + self.dims
self.x = cuda.cupy.arange(functools.reduce(mul, x_shape),
dtype=self.dtype).reshape(x_shape)
gy_shape = (2, 3) + tuple(
conv.get_conv_outsize(d, k, s, p)
for (d, k, s, p)
in six.moves.zip(self.dims, self.ksize, self.stride, self.pad))
self.gy = cuda.cupy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.max_pooling_nd(
x, self.ksize, self.stride, self.pad, cover_all=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with mock.patch('cupy.cudnn.cudnn.poolingForward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto') and
self.ndim > 1)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto') and self.ndim > 1
y = self.forward()
# should be consistent to forward regardless of use_cudnn config
y.grad = self.gy
with mock.patch('cupy.cudnn.cudnn.poolingBackward') as func:
y.backward()
self.assertEqual(func.called, expect)
testing.run_module(__name__, __file__)
| |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
from aenum import Enum
from .. import statics
from ..statics import long
class Traversal(object):
def __init__(self, graph, traversal_strategies, bytecode):
self.graph = graph
self.traversal_strategies = traversal_strategies
self.bytecode = bytecode
self.side_effects = TraversalSideEffects()
self.traversers = None
self.last_traverser = None
def __repr__(self):
return str(self.bytecode)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.bytecode == other.bytecode
else:
return False
def __iter__(self):
return self
def __next__(self):
if self.traversers is None:
self.traversal_strategies.apply_strategies(self)
if self.last_traverser is None:
self.last_traverser = next(self.traversers)
object = self.last_traverser.object
self.last_traverser.bulk = self.last_traverser.bulk - 1
if self.last_traverser.bulk <= 0:
self.last_traverser = None
return object
def toList(self):
return list(iter(self))
def toSet(self):
return set(iter(self))
def iterate(self):
while True:
try: self.nextTraverser()
except StopIteration: return self
def nextTraverser(self):
if self.traversers is None:
self.traversal_strategies.apply_strategies(self)
if self.last_traverser is None:
return next(self.traversers)
else:
temp = self.last_traverser
self.last_traverser = None
return temp
def next(self, amount=None):
if amount is None:
return self.__next__()
else:
count = 0
tempList = []
while count < amount:
count = count + 1
try: temp = self.__next__()
except StopIteration: return tempList
tempList.append(temp)
return tempList
def promise(self, cb=None):
self.traversal_strategies.apply_async_strategies(self)
future_traversal = self.remote_results
future = type(future_traversal)()
def process(f):
try:
traversal = f.result()
except Exception as e:
future.set_exception(e)
else:
self.traversers = iter(traversal.traversers)
self.side_effects = traversal.side_effects
if cb:
try:
result = cb(self)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
else:
future.set_result(self)
future_traversal.add_done_callback(process)
return future
Barrier = Enum('Barrier', ' normSack')
statics.add_static('normSack', Barrier.normSack)
Cardinality = Enum('Cardinality', ' list_ set_ single')
statics.add_static('single', Cardinality.single)
statics.add_static('list_', Cardinality.list_)
statics.add_static('set_', Cardinality.set_)
Column = Enum('Column', ' keys values')
statics.add_static('keys', Column.keys)
statics.add_static('values', Column.values)
Direction = Enum('Direction', ' BOTH IN OUT')
statics.add_static('OUT', Direction.OUT)
statics.add_static('IN', Direction.IN)
statics.add_static('BOTH', Direction.BOTH)
GraphSONVersion = Enum('GraphSONVersion', ' V1_0 V2_0 V3_0')
statics.add_static('V1_0', GraphSONVersion.V1_0)
statics.add_static('V2_0', GraphSONVersion.V2_0)
statics.add_static('V3_0', GraphSONVersion.V3_0)
GryoVersion = Enum('GryoVersion', ' V1_0 V3_0')
statics.add_static('V1_0', GryoVersion.V1_0)
statics.add_static('V3_0', GryoVersion.V3_0)
Operator = Enum('Operator', ' addAll and_ assign div max min minus mult or_ sum sumLong')
statics.add_static('sum', Operator.sum)
statics.add_static('minus', Operator.minus)
statics.add_static('mult', Operator.mult)
statics.add_static('div', Operator.div)
statics.add_static('min', Operator.min)
statics.add_static('max', Operator.max)
statics.add_static('assign', Operator.assign)
statics.add_static('and_', Operator.and_)
statics.add_static('or_', Operator.or_)
statics.add_static('addAll', Operator.addAll)
statics.add_static('sumLong', Operator.sumLong)
Order = Enum('Order', ' decr incr shuffle')
statics.add_static('incr', Order.incr)
statics.add_static('decr', Order.decr)
statics.add_static('shuffle', Order.shuffle)
Pick = Enum('Pick', ' any none')
statics.add_static('any', Pick.any)
statics.add_static('none', Pick.none)
Pop = Enum('Pop', ' all_ first last mixed')
statics.add_static('first', Pop.first)
statics.add_static('last', Pop.last)
statics.add_static('all_', Pop.all_)
statics.add_static('mixed', Pop.mixed)
Scope = Enum('Scope', ' global_ local')
statics.add_static('global_', Scope.global_)
statics.add_static('local', Scope.local)
T = Enum('T', ' id key label value')
statics.add_static('label', T.label)
statics.add_static('id', T.id)
statics.add_static('key', T.key)
statics.add_static('value', T.value)
class P(object):
def __init__(self, operator, value, other=None):
self.operator = operator
self.value = value
self.other = other
@staticmethod
def between(*args):
return P("between", *args)
@staticmethod
def eq(*args):
return P("eq", *args)
@staticmethod
def gt(*args):
return P("gt", *args)
@staticmethod
def gte(*args):
return P("gte", *args)
@staticmethod
def inside(*args):
return P("inside", *args)
@staticmethod
def lt(*args):
return P("lt", *args)
@staticmethod
def lte(*args):
return P("lte", *args)
@staticmethod
def neq(*args):
return P("neq", *args)
@staticmethod
def not_(*args):
return P("not_", *args)
@staticmethod
def outside(*args):
return P("outside", *args)
@staticmethod
def test(*args):
return P("test", *args)
@staticmethod
def within(*args):
return P("within", *args)
@staticmethod
def without(*args):
return P("without", *args)
def and_(self, arg):
return P("and", self, arg)
def or_(self, arg):
return P("or", self, arg)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.operator == other.operator and self.value == other.value and self.other == other.other
def __repr__(self):
return self.operator + "(" + str(self.value) + ")" if self.other is None else self.operator + "(" + str(self.value) + "," + str(self.other) + ")"
def and_(self, arg):
return P("and", self, arg)
def or_(self, arg):
return P("or", self, arg)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.operator == other.operator and self.value == other.value and self.other == other.other
def __repr__(self):
return self.operator + "(" + str(self.value) + ")" if self.other is None else self.operator + "(" + str(self.value) + "," + str(self.other) + ")"
def between(*args):
return P.between(*args)
statics.add_static('between',between)
def eq(*args):
return P.eq(*args)
statics.add_static('eq',eq)
def gt(*args):
return P.gt(*args)
statics.add_static('gt',gt)
def gte(*args):
return P.gte(*args)
statics.add_static('gte',gte)
def inside(*args):
return P.inside(*args)
statics.add_static('inside',inside)
def lt(*args):
return P.lt(*args)
statics.add_static('lt',lt)
def lte(*args):
return P.lte(*args)
statics.add_static('lte',lte)
def neq(*args):
return P.neq(*args)
statics.add_static('neq',neq)
def not_(*args):
return P.not_(*args)
statics.add_static('not_',not_)
def outside(*args):
return P.outside(*args)
statics.add_static('outside',outside)
def test(*args):
return P.test(*args)
statics.add_static('test',test)
def within(*args):
return P.within(*args)
statics.add_static('within',within)
def without(*args):
return P.without(*args)
statics.add_static('without',without)
'''
TRAVERSER
'''
class Traverser(object):
def __init__(self, object, bulk=None):
if bulk is None:
bulk = long(1)
self.object = object
self.bulk = bulk
def __repr__(self):
return str(self.object)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.object == other.object
'''
TRAVERSAL SIDE-EFFECTS
'''
class TraversalSideEffects(object):
def keys(self):
return set()
def get(self, key):
raise KeyError(key)
def __getitem__(self, key):
return self.get(key)
def __repr__(self):
return "sideEffects[size:" + str(len(self.keys())) + "]"
'''
TRAVERSAL STRATEGIES
'''
class TraversalStrategies(object):
global_cache = {}
def __init__(self, traversal_strategies=None):
self.traversal_strategies = traversal_strategies.traversal_strategies if traversal_strategies is not None else []
def add_strategies(self, traversal_strategies):
self.traversal_strategies = self.traversal_strategies + traversal_strategies
def apply_strategies(self, traversal):
for traversal_strategy in self.traversal_strategies:
traversal_strategy.apply(traversal)
def apply_async_strategies(self, traversal):
for traversal_strategy in self.traversal_strategies:
traversal_strategy.apply_async(traversal)
def __repr__(self):
return str(self.traversal_strategies)
class TraversalStrategy(object):
def __init__(self, strategy_name=None, configuration=None):
self.strategy_name = type(self).__name__ if strategy_name is None else strategy_name
self.configuration = {} if configuration is None else configuration
def apply(self, traversal):
return
def apply_async(self, traversal):
return
def __eq__(self, other):
return isinstance(other, self.__class__)
def __hash__(self):
return hash(self.strategy_name)
def __repr__(self):
return self.strategy_name
'''
BYTECODE
'''
class Bytecode(object):
def __init__(self, bytecode=None):
self.source_instructions = []
self.step_instructions = []
self.bindings = {}
if bytecode is not None:
self.source_instructions = list(bytecode.source_instructions)
self.step_instructions = list(bytecode.step_instructions)
def add_source(self, source_name, *args):
instruction = [source_name]
for arg in args:
instruction.append(self.__convertArgument(arg))
self.source_instructions.append(instruction)
def add_step(self, step_name, *args):
instruction = [step_name]
for arg in args:
instruction.append(self.__convertArgument(arg))
self.step_instructions.append(instruction)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.source_instructions == other.source_instructions and self.step_instructions == other.step_instructions
else:
return False
def __convertArgument(self,arg):
if isinstance(arg, Traversal):
self.bindings.update(arg.bytecode.bindings)
return arg.bytecode
elif isinstance(arg, dict):
newDict = {}
for key in arg:
newDict[self.__convertArgument(key)] = self.__convertArgument(arg[key])
return newDict
elif isinstance(arg, list):
newList = []
for item in arg:
newList.append(self.__convertArgument(item))
return newList
elif isinstance(arg, set):
newSet = set()
for item in arg:
newSet.add(self.__convertArgument(item))
return newSet
elif isinstance(arg, tuple) and 2 == len(arg) and isinstance(arg[0], str):
self.bindings[arg[0]] = arg[1]
return Binding(arg[0],self.__convertArgument(arg[1]))
else:
return arg
def __repr__(self):
return (str(self.source_instructions) if len(self.source_instructions) > 0 else "") + \
(str(self.step_instructions) if len(self.step_instructions) > 0 else "")
'''
BINDINGS
'''
class Bindings(object):
def of(self,key,value):
if not isinstance(key, str):
raise TypeError("Key must be str")
return (key,value)
class Binding(object):
def __init__(self,key,value):
self.key = key
self.value = value
def __eq__(self, other):
return isinstance(other, self.__class__) and self.key == other.key and self.value == other.value
def __hash__(self):
return hash(self.key) + hash(self.value)
def __repr__(self):
return "binding[" + self.key + "=" + str(self.value) + "]"
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class ProxyOnlyResource(msrest.serialization.Model):
"""A proxy only azure resource object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyOnlyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class DiagnosticSettingsCategoryResource(ProxyOnlyResource):
"""The diagnostic settings category resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param category_type: The type of the diagnostic settings category. Possible values include:
"Metrics", "Logs".
:type category_type: str or ~$(python-base-namespace).v2017_05_01_preview.models.CategoryType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'category_type': {'key': 'properties.categoryType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DiagnosticSettingsCategoryResource, self).__init__(**kwargs)
self.category_type = kwargs.get('category_type', None)
class DiagnosticSettingsCategoryResourceCollection(msrest.serialization.Model):
"""Represents a collection of diagnostic setting category resources.
:param value: The collection of diagnostic settings category resources.
:type value:
list[~$(python-base-namespace).v2017_05_01_preview.models.DiagnosticSettingsCategoryResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DiagnosticSettingsCategoryResource]'},
}
def __init__(
self,
**kwargs
):
super(DiagnosticSettingsCategoryResourceCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class DiagnosticSettingsResource(ProxyOnlyResource):
"""The diagnostic setting resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param storage_account_id: The resource ID of the storage account to which you would like to
send Diagnostic Logs.
:type storage_account_id: str
:param service_bus_rule_id: The service bus rule Id of the diagnostic setting. This is here to
maintain backwards compatibility.
:type service_bus_rule_id: str
:param event_hub_authorization_rule_id: The resource Id for the event hub authorization rule.
:type event_hub_authorization_rule_id: str
:param event_hub_name: The name of the event hub. If none is specified, the default event hub
will be selected.
:type event_hub_name: str
:param metrics: The list of metric settings.
:type metrics: list[~$(python-base-namespace).v2017_05_01_preview.models.MetricSettings]
:param logs: The list of logs settings.
:type logs: list[~$(python-base-namespace).v2017_05_01_preview.models.LogSettings]
:param workspace_id: The full ARM resource ID of the Log Analytics workspace to which you would
like to send Diagnostic Logs. Example:
/subscriptions/4b9e8510-67ab-4e9a-95a9-e2f1e570ea9c/resourceGroups/insights-integration/providers/Microsoft.OperationalInsights/workspaces/viruela2.
:type workspace_id: str
:param log_analytics_destination_type: A string indicating whether the export to Log Analytics
should use the default destination type, i.e. AzureDiagnostics, or use a destination type
constructed as follows: :code:`<normalized service identity>`_:code:`<normalized category
name>`. Possible values are: Dedicated and null (null is default.).
:type log_analytics_destination_type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'storage_account_id': {'key': 'properties.storageAccountId', 'type': 'str'},
'service_bus_rule_id': {'key': 'properties.serviceBusRuleId', 'type': 'str'},
'event_hub_authorization_rule_id': {'key': 'properties.eventHubAuthorizationRuleId', 'type': 'str'},
'event_hub_name': {'key': 'properties.eventHubName', 'type': 'str'},
'metrics': {'key': 'properties.metrics', 'type': '[MetricSettings]'},
'logs': {'key': 'properties.logs', 'type': '[LogSettings]'},
'workspace_id': {'key': 'properties.workspaceId', 'type': 'str'},
'log_analytics_destination_type': {'key': 'properties.logAnalyticsDestinationType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DiagnosticSettingsResource, self).__init__(**kwargs)
self.storage_account_id = kwargs.get('storage_account_id', None)
self.service_bus_rule_id = kwargs.get('service_bus_rule_id', None)
self.event_hub_authorization_rule_id = kwargs.get('event_hub_authorization_rule_id', None)
self.event_hub_name = kwargs.get('event_hub_name', None)
self.metrics = kwargs.get('metrics', None)
self.logs = kwargs.get('logs', None)
self.workspace_id = kwargs.get('workspace_id', None)
self.log_analytics_destination_type = kwargs.get('log_analytics_destination_type', None)
class DiagnosticSettingsResourceCollection(msrest.serialization.Model):
"""Represents a collection of alert rule resources.
:param value: The collection of diagnostic settings resources;.
:type value:
list[~$(python-base-namespace).v2017_05_01_preview.models.DiagnosticSettingsResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DiagnosticSettingsResource]'},
}
def __init__(
self,
**kwargs
):
super(DiagnosticSettingsResourceCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class ErrorResponse(msrest.serialization.Model):
"""Describes the format of Error response.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class LocalizableString(msrest.serialization.Model):
"""The localizable string class.
All required parameters must be populated in order to send to Azure.
:param value: Required. the invariant value.
:type value: str
:param localized_value: the locale specific value.
:type localized_value: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LocalizableString, self).__init__(**kwargs)
self.value = kwargs['value']
self.localized_value = kwargs.get('localized_value', None)
class LogSettings(msrest.serialization.Model):
"""Part of MultiTenantDiagnosticSettings. Specifies the settings for a particular log.
All required parameters must be populated in order to send to Azure.
:param category: Name of a Diagnostic Log category for a resource type this setting is applied
to. To obtain the list of Diagnostic Log categories for a resource, first perform a GET
diagnostic settings operation.
:type category: str
:param enabled: Required. a value indicating whether this log is enabled.
:type enabled: bool
:param retention_policy: the retention policy for this log.
:type retention_policy: ~$(python-base-namespace).v2017_05_01_preview.models.RetentionPolicy
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'category': {'key': 'category', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
'retention_policy': {'key': 'retentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(
self,
**kwargs
):
super(LogSettings, self).__init__(**kwargs)
self.category = kwargs.get('category', None)
self.enabled = kwargs['enabled']
self.retention_policy = kwargs.get('retention_policy', None)
class MetadataValue(msrest.serialization.Model):
"""Represents a metric metadata value.
:param name: the name of the metadata.
:type name: ~$(python-base-namespace).v2017_05_01_preview.models.LocalizableString
:param value: the value of the metadata.
:type value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'LocalizableString'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MetadataValue, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.value = kwargs.get('value', None)
class Metric(msrest.serialization.Model):
"""The result data of a query.
All required parameters must be populated in order to send to Azure.
:param id: Required. the metric Id.
:type id: str
:param type: Required. the resource type of the metric resource.
:type type: str
:param name: Required. the name and the display name of the metric, i.e. it is localizable
string.
:type name: ~$(python-base-namespace).v2017_05_01_preview.models.LocalizableString
:param display_description: Detailed description of this metric.
:type display_description: str
:param error_code: 'Success' or the error details on query failures for this metric.
:type error_code: str
:param error_message: Error message encountered querying this specific metric.
:type error_message: str
:param unit: Required. the unit of the metric. Possible values include: "Count", "Bytes",
"Seconds", "CountPerSecond", "BytesPerSecond", "Percent", "MilliSeconds", "ByteSeconds",
"Unspecified", "Cores", "MilliCores", "NanoCores", "BitsPerSecond".
:type unit: str or ~$(python-base-namespace).v2017_05_01_preview.models.Unit
:param timeseries: Required. the time series returned when a data query is performed.
:type timeseries: list[~$(python-base-namespace).v2017_05_01_preview.models.TimeSeriesElement]
"""
_validation = {
'id': {'required': True},
'type': {'required': True},
'name': {'required': True},
'unit': {'required': True},
'timeseries': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'LocalizableString'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'timeseries': {'key': 'timeseries', 'type': '[TimeSeriesElement]'},
}
def __init__(
self,
**kwargs
):
super(Metric, self).__init__(**kwargs)
self.id = kwargs['id']
self.type = kwargs['type']
self.name = kwargs['name']
self.display_description = kwargs.get('display_description', None)
self.error_code = kwargs.get('error_code', None)
self.error_message = kwargs.get('error_message', None)
self.unit = kwargs['unit']
self.timeseries = kwargs['timeseries']
class MetricAvailability(msrest.serialization.Model):
"""Metric availability specifies the time grain (aggregation interval or frequency) and the retention period for that time grain.
:param time_grain: the time grain specifies the aggregation interval for the metric. Expressed
as a duration 'PT1M', 'P1D', etc.
:type time_grain: ~datetime.timedelta
:param retention: the retention period for the metric at the specified timegrain. Expressed as
a duration 'PT1M', 'P1D', etc.
:type retention: ~datetime.timedelta
"""
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'duration'},
'retention': {'key': 'retention', 'type': 'duration'},
}
def __init__(
self,
**kwargs
):
super(MetricAvailability, self).__init__(**kwargs)
self.time_grain = kwargs.get('time_grain', None)
self.retention = kwargs.get('retention', None)
class MetricDefinition(msrest.serialization.Model):
"""Metric definition class specifies the metadata for a metric.
:param is_dimension_required: Flag to indicate whether the dimension is required.
:type is_dimension_required: bool
:param resource_id: the resource identifier of the resource that emitted the metric.
:type resource_id: str
:param name: the name and the display name of the metric, i.e. it is a localizable string.
:type name: ~$(python-base-namespace).v2017_05_01_preview.models.LocalizableString
:param display_description: Detailed description of this metric.
:type display_description: str
:param category: Custom category name for this metric.
:type category: str
:param unit: the unit of the metric. Possible values include: "Count", "Bytes", "Seconds",
"CountPerSecond", "BytesPerSecond", "Percent", "MilliSeconds", "ByteSeconds", "Unspecified",
"Cores", "MilliCores", "NanoCores", "BitsPerSecond".
:type unit: str or ~$(python-base-namespace).v2017_05_01_preview.models.Unit
:param primary_aggregation_type: the primary aggregation type value defining how to use the
values for display. Possible values include: "None", "Average", "Count", "Minimum", "Maximum",
"Total".
:type primary_aggregation_type: str or
~$(python-base-namespace).v2017_05_01_preview.models.AggregationType
:param metric_availabilities: the collection of what aggregation intervals are available to be
queried.
:type metric_availabilities:
list[~$(python-base-namespace).v2017_05_01_preview.models.MetricAvailability]
:param id: the resource identifier of the metric definition.
:type id: str
:param dimensions: the name and the display name of the dimension, i.e. it is a localizable
string.
:type dimensions: list[~$(python-base-namespace).v2017_05_01_preview.models.LocalizableString]
"""
_attribute_map = {
'is_dimension_required': {'key': 'isDimensionRequired', 'type': 'bool'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'LocalizableString'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'primary_aggregation_type': {'key': 'primaryAggregationType', 'type': 'str'},
'metric_availabilities': {'key': 'metricAvailabilities', 'type': '[MetricAvailability]'},
'id': {'key': 'id', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[LocalizableString]'},
}
def __init__(
self,
**kwargs
):
super(MetricDefinition, self).__init__(**kwargs)
self.is_dimension_required = kwargs.get('is_dimension_required', None)
self.resource_id = kwargs.get('resource_id', None)
self.name = kwargs.get('name', None)
self.display_description = kwargs.get('display_description', None)
self.category = kwargs.get('category', None)
self.unit = kwargs.get('unit', None)
self.primary_aggregation_type = kwargs.get('primary_aggregation_type', None)
self.metric_availabilities = kwargs.get('metric_availabilities', None)
self.id = kwargs.get('id', None)
self.dimensions = kwargs.get('dimensions', None)
class MetricDefinitionCollection(msrest.serialization.Model):
"""Represents collection of metric definitions.
All required parameters must be populated in order to send to Azure.
:param value: Required. the values for the metric definitions.
:type value: list[~$(python-base-namespace).v2017_05_01_preview.models.MetricDefinition]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MetricDefinition]'},
}
def __init__(
self,
**kwargs
):
super(MetricDefinitionCollection, self).__init__(**kwargs)
self.value = kwargs['value']
class MetricSettings(msrest.serialization.Model):
"""Part of MultiTenantDiagnosticSettings. Specifies the settings for a particular metric.
All required parameters must be populated in order to send to Azure.
:param time_grain: the timegrain of the metric in ISO8601 format.
:type time_grain: ~datetime.timedelta
:param category: Name of a Diagnostic Metric category for a resource type this setting is
applied to. To obtain the list of Diagnostic metric categories for a resource, first perform a
GET diagnostic settings operation.
:type category: str
:param enabled: Required. a value indicating whether this category is enabled.
:type enabled: bool
:param retention_policy: the retention policy for this category.
:type retention_policy: ~$(python-base-namespace).v2017_05_01_preview.models.RetentionPolicy
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'time_grain': {'key': 'timeGrain', 'type': 'duration'},
'category': {'key': 'category', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
'retention_policy': {'key': 'retentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(
self,
**kwargs
):
super(MetricSettings, self).__init__(**kwargs)
self.time_grain = kwargs.get('time_grain', None)
self.category = kwargs.get('category', None)
self.enabled = kwargs['enabled']
self.retention_policy = kwargs.get('retention_policy', None)
class MetricValue(msrest.serialization.Model):
"""Represents a metric value.
All required parameters must be populated in order to send to Azure.
:param time_stamp: Required. the timestamp for the metric value in ISO 8601 format.
:type time_stamp: ~datetime.datetime
:param average: the average value in the time range.
:type average: float
:param minimum: the least value in the time range.
:type minimum: float
:param maximum: the greatest value in the time range.
:type maximum: float
:param total: the sum of all of the values in the time range.
:type total: float
:param count: the number of samples in the time range. Can be used to determine the number of
values that contributed to the average value.
:type count: long
"""
_validation = {
'time_stamp': {'required': True},
}
_attribute_map = {
'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'},
'average': {'key': 'average', 'type': 'float'},
'minimum': {'key': 'minimum', 'type': 'float'},
'maximum': {'key': 'maximum', 'type': 'float'},
'total': {'key': 'total', 'type': 'float'},
'count': {'key': 'count', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(MetricValue, self).__init__(**kwargs)
self.time_stamp = kwargs['time_stamp']
self.average = kwargs.get('average', None)
self.minimum = kwargs.get('minimum', None)
self.maximum = kwargs.get('maximum', None)
self.total = kwargs.get('total', None)
self.count = kwargs.get('count', None)
class Response(msrest.serialization.Model):
"""The response to a metrics query.
All required parameters must be populated in order to send to Azure.
:param cost: The integer value representing the relative cost of the query.
:type cost: int
:param timespan: Required. The timespan for which the data was retrieved. Its value consists of
two datetimes concatenated, separated by '/'. This may be adjusted in the future and returned
back from what was originally requested.
:type timespan: str
:param interval: The interval (window size) for which the metric data was returned in. This
may be adjusted in the future and returned back from what was originally requested. This is
not present if a metadata request was made.
:type interval: ~datetime.timedelta
:param value: Required. the value of the collection.
:type value: list[~$(python-base-namespace).v2017_05_01_preview.models.Metric]
"""
_validation = {
'cost': {'minimum': 0},
'timespan': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'cost': {'key': 'cost', 'type': 'int'},
'timespan': {'key': 'timespan', 'type': 'str'},
'interval': {'key': 'interval', 'type': 'duration'},
'value': {'key': 'value', 'type': '[Metric]'},
}
def __init__(
self,
**kwargs
):
super(Response, self).__init__(**kwargs)
self.cost = kwargs.get('cost', None)
self.timespan = kwargs['timespan']
self.interval = kwargs.get('interval', None)
self.value = kwargs['value']
class RetentionPolicy(msrest.serialization.Model):
"""Specifies the retention policy for the log.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. a value indicating whether the retention policy is enabled.
:type enabled: bool
:param days: Required. the number of days for the retention in days. A value of 0 will retain
the events indefinitely.
:type days: int
"""
_validation = {
'enabled': {'required': True},
'days': {'required': True, 'minimum': 0},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'days': {'key': 'days', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RetentionPolicy, self).__init__(**kwargs)
self.enabled = kwargs['enabled']
self.days = kwargs['days']
class SubscriptionProxyOnlyResource(msrest.serialization.Model):
"""A proxy only azure resource object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Location of the resource.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubscriptionProxyOnlyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
class SubscriptionDiagnosticSettingsResource(SubscriptionProxyOnlyResource):
"""The subscription diagnostic setting resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource Id.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Location of the resource.
:type location: str
:param storage_account_id: The resource ID of the storage account to which you would like to
send Diagnostic Logs.
:type storage_account_id: str
:param service_bus_rule_id: The service bus rule Id of the diagnostic setting. This is here to
maintain backwards compatibility.
:type service_bus_rule_id: str
:param event_hub_authorization_rule_id: The resource Id for the event hub authorization rule.
:type event_hub_authorization_rule_id: str
:param event_hub_name: The name of the event hub. If none is specified, the default event hub
will be selected.
:type event_hub_name: str
:param logs: The list of logs settings.
:type logs: list[~$(python-base-namespace).v2017_05_01_preview.models.SubscriptionLogSettings]
:param workspace_id: The full ARM resource ID of the Log Analytics workspace to which you would
like to send Diagnostic Logs. Example:
/subscriptions/4b9e8510-67ab-4e9a-95a9-e2f1e570ea9c/resourceGroups/insights-integration/providers/Microsoft.OperationalInsights/workspaces/viruela2.
:type workspace_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'storage_account_id': {'key': 'properties.storageAccountId', 'type': 'str'},
'service_bus_rule_id': {'key': 'properties.serviceBusRuleId', 'type': 'str'},
'event_hub_authorization_rule_id': {'key': 'properties.eventHubAuthorizationRuleId', 'type': 'str'},
'event_hub_name': {'key': 'properties.eventHubName', 'type': 'str'},
'logs': {'key': 'properties.logs', 'type': '[SubscriptionLogSettings]'},
'workspace_id': {'key': 'properties.workspaceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubscriptionDiagnosticSettingsResource, self).__init__(**kwargs)
self.storage_account_id = kwargs.get('storage_account_id', None)
self.service_bus_rule_id = kwargs.get('service_bus_rule_id', None)
self.event_hub_authorization_rule_id = kwargs.get('event_hub_authorization_rule_id', None)
self.event_hub_name = kwargs.get('event_hub_name', None)
self.logs = kwargs.get('logs', None)
self.workspace_id = kwargs.get('workspace_id', None)
class SubscriptionDiagnosticSettingsResourceCollection(msrest.serialization.Model):
"""Represents a collection of subscription diagnostic settings resources.
:param value: The collection of subscription diagnostic settings resources.
:type value:
list[~$(python-base-namespace).v2017_05_01_preview.models.SubscriptionDiagnosticSettingsResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[SubscriptionDiagnosticSettingsResource]'},
}
def __init__(
self,
**kwargs
):
super(SubscriptionDiagnosticSettingsResourceCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class SubscriptionLogSettings(msrest.serialization.Model):
"""Part of Subscription diagnostic setting. Specifies the settings for a particular log.
All required parameters must be populated in order to send to Azure.
:param category: Name of a Subscription Diagnostic Log category for a resource type this
setting is applied to.
:type category: str
:param enabled: Required. a value indicating whether this log is enabled.
:type enabled: bool
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'category': {'key': 'category', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(SubscriptionLogSettings, self).__init__(**kwargs)
self.category = kwargs.get('category', None)
self.enabled = kwargs['enabled']
class TimeSeriesElement(msrest.serialization.Model):
"""A time series result type. The discriminator value is always TimeSeries in this case.
:param metadatavalues: the metadata values returned if $filter was specified in the call.
:type metadatavalues: list[~$(python-base-namespace).v2017_05_01_preview.models.MetadataValue]
:param data: An array of data points representing the metric values. This is only returned if
a result type of data is specified.
:type data: list[~$(python-base-namespace).v2017_05_01_preview.models.MetricValue]
"""
_attribute_map = {
'metadatavalues': {'key': 'metadatavalues', 'type': '[MetadataValue]'},
'data': {'key': 'data', 'type': '[MetricValue]'},
}
def __init__(
self,
**kwargs
):
super(TimeSeriesElement, self).__init__(**kwargs)
self.metadatavalues = kwargs.get('metadatavalues', None)
self.data = kwargs.get('data', None)
| |
# ******************************************************************************
# pysimm.forcefield.tip3p module
# ******************************************************************************
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2016 Michael E. Fortunato, Coray M. Colina
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from itertools import permutations, combinations
from . import gasteiger
from ..system import Angle, Dihedral, Improper
from .forcefield import Forcefield
class Tip3p(Forcefield):
"""pysimm.forcefield.Tip3p
Forcefield object with typing rules for Tip3p model.
By default reads data file in forcefields subdirectory.
Attributes:
ff_name: tip3p
pair_style: lj
ff_class: 1
"""
def __init__(self, db_file=None):
if not db_file and db_file is not False:
db_file = os.path.join(
os.path.dirname(
os.path.realpath(__file__)
),
os.pardir, 'data', 'forcefields', 'tip3p.xml'
)
Forcefield.__init__(self, db_file)
self.name = 'tip3p'
self.pair_style = 'lj'
self.ff_class = '1'
def assign_ptypes(self, s):
"""pysimm.forcefield.Tip3p.assign_ptypes
Tip3p specific particle typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds defined.
*** use System.add_particle_bonding() to ensure this ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.pair_style = self.pair_style
for p in s.particles:
p.bonded_to = [x.a if p is x.b else x.b for x in p.bonds]
p.bond_orders = [x.order for x in p.bonds]
if None in p.bond_orders:
error_print('error: bond orders are not set')
p.bond_elements = [x.a.elem if p is x.b else x.b.elem for x in
p.bonds]
p.nbonds = len(p.bond_elements)
for p in s.particles:
if p.elem == 'H':
p.type_name = 'hw'
elif p.elem == 'O':
p.type_name = 'ow'
else:
print('cant type particle %s' % p.tag)
return p
type_ = self.particle_types.get(p.type_name)
if not type_:
print(p.tag, p.elem, p.type_name)
all_types.add(self.particle_types.get(p.type_name)[0])
for pt in all_types:
pt = pt.copy()
s.particle_types.add(pt)
for p in s.particles:
pt = s.particle_types.get(p.type_name)
if pt:
p.type = pt[0]
def assign_btypes(self, s):
"""pysimm.forcefield.Tip3p.assign_btypes
Tip3p specific bond typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have type and type.name defined.
*** use after assign_ptypes ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
for b in s.bonds:
bt = self.bond_types.get('%s,%s' % (b.a.type.name, b.b.type.name))
if bt:
b.type_name = bt[0].name
else:
print('couldnt type this bond %s,%s'
% (b.a.type.name, b.b.type.name))
return b
all_types.add(self.bond_types.get(b.type_name)[0])
for bt in all_types:
bt = bt.copy()
s.bond_types.add(bt)
for b in s.bonds:
bt = s.bond_types.get(b.type_name)
if bt:
b.type = bt[0]
def assign_atypes(self, s):
"""pysimm.forcefield.Tip3p.assign_atypes
Tip3p specific angle typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds, type and type.name defined.
*** use after assign_ptypes ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
for p in s.particles:
p.bonded_to = [x.a if p is x.b else x.b for x in p.bonds]
for p1 in p.bonded_to:
for p2 in p.bonded_to:
if p1 is not p2:
unique = True
for a in s.angles:
if ((a.a is p1 and a.b is p and a.c is p2) or
(a.a is p2 and a.b is p and a.c is p1)):
unique = False
if unique:
at = self.angle_types.get('%s,%s,%s'
% (p1.type.name,
p.type.name,
p2.type.name))
if at:
s.angles.add(Angle(type_name=at[0].name,
a=p1, b=p, c=p2))
all_types.add(at[0])
else:
print('I cant type this angle %s,%s,%s'
% (p1.type.name,
p.type.name,
p2.type.name))
for at in all_types:
at = at.copy()
s.angle_types.add(at)
for a in s.angles:
at = s.angle_types.get(a.type_name)
if at:
a.type = at[0]
def assign_dtypes(self, s):
"""pysimm.forcefield.Tip3p.assign_dtypes
Tip3p specific dihedral typing rules.
There are none.
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
pass
def assign_itypes(self, s):
"""pysimm.forcefield.Tip3p.assign_itypes
Tip3p specific improper typing rules.
There are none.
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
pass
def assign_charges(self, s, charges='default'):
"""pysimm.forcefield.Tip3p.assign_charges
Tip3p specific charge assignment.
There are none.
Args:
s: :class:`~pysimm.system.System`
charges: default
Returns:
None
"""
if charges == 'gasteiger':
print('adding gasteiger charges')
gasteiger.set_charges(s)
elif charges == 'default':
print('adding default TIP3P charges')
for p in s.particles:
p.charge = 0
for b in s.bonds:
n1 = b.a.type.eq_bond or b.a.type.name
n2 = b.b.type.eq_bond or b.b.type.name
btype = self.bond_types.get('%s,%s' % (n1, n2))
if btype:
btype = btype[0]
if btype.name == '%s,%s' % (n1, n2):
b.a.charge += float(btype.q1)
b.b.charge += float(btype.q2)
elif btype.name == '%s,%s' % (n2, n1):
b.a.charge += float(btype.q2)
b.b.charge += float(btype.q1)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from .data import MessageHELLO, MessageRA, MessageLSU, MessageLSR, \
isCompatibleVersion, getIdAndVersion
from .hello import HelloProtocol
from .link import LinkStateEngine
from .path import PathEngine
from .node import NodeTracker
from .message import Message
from traceback import format_exc, extract_stack
import time
##
# Import the Dispatch adapters from the environment. If they are not found
# (i.e. we are in a test bench, etc.), load the stub versions.
##
from ..dispatch import IoAdapter, LogAdapter, LOG_TRACE, LOG_INFO, LOG_ERROR, LOG_WARNING, LOG_STACK_LIMIT
from ..dispatch import TREATMENT_MULTICAST_FLOOD
class RouterEngine(object):
"""
"""
def __init__(self, router_adapter, router_id, area, max_routers, config_override={}):
"""
Initialize an instance of a router for a domain.
"""
##
# Record important information about this router instance
##
self.domain = "domain"
self.router_adapter = router_adapter
self._config = None # Not yet loaded
self._log_hello = LogAdapter("ROUTER_HELLO")
self._log_ls = LogAdapter("ROUTER_LS")
self._log_general = LogAdapter("ROUTER")
self.io_adapter = [IoAdapter(self.receive, "qdrouter", 'L', '0', TREATMENT_MULTICAST_FLOOD),
IoAdapter(self.receive, "qdrouter", 'T', '0', TREATMENT_MULTICAST_FLOOD),
IoAdapter(self.receive, "qdhello", 'L', '0', TREATMENT_MULTICAST_FLOOD)]
self.max_routers = max_routers
self.id = router_id
self.instance = int(time.time())
self.area = area
self.incompatIds = []
self.log(LOG_INFO, "Router Engine Instantiated: id=%s instance=%d max_routers=%d" %
(self.id, self.instance, self.max_routers))
##
# Launch the sub-module engines
##
self.node_tracker = NodeTracker(self, self.max_routers)
self.hello_protocol = HelloProtocol(self, self.node_tracker)
self.link_state_engine = LinkStateEngine(self)
self.path_engine = PathEngine(self)
# ========================================================================================
# Adapter Entry Points - invoked from the adapter
# ========================================================================================
def getId(self):
"""
Return the router's ID
"""
return self.id
@property
def config(self):
if not self._config:
try:
self._config = self.router_adapter.get_agent().find_entity_by_type('router')[0]
except IndexError:
raise ValueError("No router configuration found")
return self._config
def setMobileSeq(self, router_maskbit, mobile_seq):
"""
Another router's mobile sequence number has been changed and the Python router needs to store
this number.
"""
self.node_tracker.set_mobile_seq(router_maskbit, mobile_seq)
def setMyMobileSeq(self, mobile_seq):
"""
This router's mobile sequence number has been changed and the Python router needs to store
this number and immediately send a router-advertisement message to reflect the change.
"""
self.link_state_engine.set_mobile_seq(mobile_seq)
self.link_state_engine.send_ra(time.time())
def linkLost(self, link_id):
"""
The control-link to a neighbor has been dropped. We can cancel the neighbor from the
link-state immediately instead of waiting for the hello-timeout to expire.
"""
self.node_tracker.link_lost(link_id)
def handleTimerTick(self):
"""
"""
try:
now = time.time()
self.hello_protocol.tick(now)
self.link_state_engine.tick(now)
self.node_tracker.tick(now)
except Exception:
self.log(LOG_ERROR, "Exception in timer processing\n%s" % format_exc(LOG_STACK_LIMIT))
def handleControlMessage(self, opcode, body, link_id, cost):
"""
"""
if not isCompatibleVersion(body):
rid, version = getIdAndVersion(body)
if rid not in self.incompatIds:
self.incompatIds.append(rid)
self.log(LOG_WARNING, "Received %s at protocol version %d from %s. Ignoring." % (opcode, version, rid))
return
try:
now = time.time()
if opcode == 'HELLO':
msg = MessageHELLO(body)
self.log_hello(LOG_TRACE, "RCVD: %r" % msg)
self.hello_protocol.handle_hello(msg, now, link_id, cost)
elif opcode == 'RA':
msg = MessageRA(body)
self.log_ls(LOG_TRACE, "RCVD: %r" % msg)
self.link_state_engine.handle_ra(msg, now)
elif opcode == 'LSU':
msg = MessageLSU(body)
self.log_ls(LOG_TRACE, "RCVD: %r" % msg)
self.link_state_engine.handle_lsu(msg, now)
elif opcode == 'LSR':
msg = MessageLSR(body)
self.log_ls(LOG_TRACE, "RCVD: %r" % msg)
self.link_state_engine.handle_lsr(msg, now)
except Exception:
self.log(LOG_ERROR, "Exception in control message processing\n%s" % format_exc(LOG_STACK_LIMIT))
self.log(LOG_ERROR, "Control message error: opcode=%s body=%r" % (opcode, body))
def receive(self, message, link_id, cost):
"""
This is the IoAdapter message-receive handler
"""
try:
self.handleControlMessage(message.properties['opcode'], message.body, link_id, cost)
except Exception:
self.log(LOG_ERROR, "Exception in raw message processing\n%s" % format_exc(LOG_STACK_LIMIT))
self.log(LOG_ERROR, "Exception in raw message processing: properties=%r body=%r" %
(message.properties, message.body))
def getRouterData(self, kind):
"""
"""
if kind == 'help':
return {'help' : "Get list of supported values for kind",
'link-state' : "This router's link state",
'link-state-set' : "The set of link states from known routers",
'next-hops' : "Next hops to each known router"
}
if kind == 'link-state' :
return self.neighbor_engine.link_state.to_dict()
if kind == 'link-state-set' :
copy = {}
for _id, _ls in self.link_state_engine.collection.items():
copy[_id] = _ls.to_dict()
return copy
return {'notice': 'Use kind="help" to get a list of possibilities'}
# ========================================================================================
# Adapter Calls - outbound calls to Dispatch
# ========================================================================================
def log(self, level, text):
"""
Emit a log message to the host's event log
"""
info = extract_stack(limit=2)[0] # Caller frame info
self._log_general.log(level, text, info[0], info[1])
def log_hello(self, level, text):
"""
Emit a log message to the host's event log
"""
info = extract_stack(limit=2)[0] # Caller frame info
self._log_hello.log(level, text, info[0], info[1])
def log_ls(self, level, text):
"""
Emit a log message to the host's event log
"""
info = extract_stack(limit=2)[0] # Caller frame info
self._log_ls.log(level, text, info[0], info[1])
def log_ma(self, level, text):
"""
Emit a log message to the host's event log
"""
info = extract_stack(limit=2)[0] # Caller frame info
self._log_ma.log(level, text, info[0], info[1])
def send(self, dest, msg):
"""
Send a control message to another router.
"""
app_props = {'opcode' : msg.get_opcode()}
self.io_adapter[0].send(Message(address=dest, properties=app_props, body=msg.to_dict()), True, True)
def node_updated(self, addr, reachable, neighbor):
"""
"""
self.router_adapter(addr, reachable, neighbor)
| |
# -*- coding: utf-8 -*-
#
# Project: weechat-notify-send
# Homepage: https://github.com/s3rvac/weechat-notify-send
# Description: Sends highlight and private-message notifications through
# notify-send. Requires libnotify.
# License: MIT (see below)
#
# Copyright (c) 2015-2016 by Petr Zemek <s3rvac@gmail.com> and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import re
import subprocess
import sys
import time
# Ensure that we are running under WeeChat.
try:
import weechat
except ImportError:
print('This script has to run under WeeChat (https://weechat.org/).')
sys.exit(1)
# Name of the script.
SCRIPT_NAME = 'notify_send'
# Author of the script.
SCRIPT_AUTHOR = 's3rvac'
# Version of the script.
SCRIPT_VERSION = '0.5'
# License under which the script is distributed.
SCRIPT_LICENSE = 'MIT'
# Description of the script.
SCRIPT_DESC = ('Sends highlight and private-message notifications '
'through notify-send.')
# Name of a function to be called when the script is unloaded.
SCRIPT_SHUTDOWN_FUNC = ''
# Used character set (utf-8 by default).
SCRIPT_CHARSET = ''
# Script options.
OPTIONS = {
'notify_on_highlights': (
'on',
'Send notifications on highlights.'
),
'notify_on_privmsgs': (
'on',
'Send notifications on private messages.'
),
'notify_when_away': (
'on',
'Send also notifications when away.'
),
'notify_for_current_buffer': (
'on',
'Send also notifications for the currently active buffer.'
),
'min_notification_delay': (
'500',
'A minimal delay between successive notifications from the same '
'buffer (in milliseconds; set to 0 to show all notifications).'
),
'ignore_buffers': (
'',
'A comma-separated list of buffers from which no notifications should '
'be shown.'
),
'ignore_buffers_starting_with': (
'',
'A comma-separated list of buffer prefixes from which no '
'notifications should be shown.'
),
'ignore_nicks': (
'',
'A comma-separated list of nicks from which no notifications should '
'be shown.'
),
'ignore_nicks_starting_with': (
'',
'A comma-separated list of nick prefixes from which no '
'notifications should be shown.'
),
'nick_separator': (
': ',
'A separator between a nick and a message.'
),
'escape_html': (
'on',
"Escapes the '<', '>', and '&' characters in notification messages."
),
'max_length': (
'72',
'Maximal length of a notification (0 means no limit).'
),
'ellipsis': (
'[..]',
'Ellipsis to be used for notifications that are too long.'
),
'icon': (
'/usr/share/icons/hicolor/32x32/apps/weechat.png',
'Path to an icon to be shown in notifications.'
),
'timeout': (
'5000',
'Time after which the notification disappears (in milliseconds; '
'set to 0 to disable).'
),
'urgency': (
'normal',
'Urgency (low, normal, critical).'
)
}
class Notification(object):
"""A representation of a notification."""
def __init__(self, source, message, icon, timeout, urgency):
self.source = source
self.message = message
self.icon = icon
self.timeout = timeout
self.urgency = urgency
def default_value_of(option):
"""Returns the default value of the given option."""
return OPTIONS[option][0]
def add_default_value_to(description, default_value):
"""Adds the given default value to the given option description."""
# All descriptions end with a period, so do not add another period.
return '{} Default: {}.'.format(
description,
default_value if default_value else '""'
)
def nick_from_prefix(prefix):
"""Returns a nick from the given prefix.
The prefix comes from WeeChat. It is a nick with an optional mode (e.g. @
if the user is an operator or + if the user has voice).
"""
# We have to remove the mode (if any).
return re.sub(r'^[@+](.*)', r'\1', prefix)
def notification_cb(data, buffer, date, tags, is_displayed, is_highlight,
prefix, message):
"""A callback for notifications from WeeChat."""
is_highlight = int(is_highlight)
nick = nick_from_prefix(prefix)
if notification_should_be_sent(buffer, nick, is_highlight):
notification = prepare_notification(
buffer, is_highlight, nick, message
)
send_notification(notification)
return weechat.WEECHAT_RC_OK
def notification_should_be_sent(buffer, nick, is_highlight):
"""Should a notification be sent?"""
if notification_should_be_sent_disregarding_time(buffer, nick, is_highlight):
# The following function should be called only when the notification
# should be sent (it updates the last notification time).
if not is_below_min_notification_delay(buffer):
return True
return False
def notification_should_be_sent_disregarding_time(buffer, nick, is_highlight):
"""Should a notification be sent when not considering time?"""
if buffer == weechat.current_buffer():
if not notify_for_current_buffer():
return False
if is_away(buffer):
if not notify_when_away():
return False
if ignore_notifications_from_nick(nick):
return False
if ignore_notifications_from_buffer(buffer):
return False
if is_private_message(buffer):
if i_am_author_of_message(buffer, nick):
# Do not send notifications from myself.
return False
return notify_on_privmsgs()
if is_highlight:
return notify_on_highlights()
# We send notifications only for private messages or highlights.
return False
def is_below_min_notification_delay(buffer):
"""Is a notification in the given buffer below the minimal delay between
successive notifications from the same buffer?
When called, this function updates the time of the last notification.
"""
# We store the time of the last notification in a buffer-local variable to
# make it persistent over the lifetime of this plugin.
LAST_NOTIFICATION_TIME_VAR = 'notify_send_last_notification_time'
last_notification_time = buffer_get_float(
buffer,
'localvar_' + LAST_NOTIFICATION_TIME_VAR
)
min_notification_delay = weechat.config_get_plugin('min_notification_delay')
# min_notification_delay is in milliseconds (str). To compare it with
# last_notification_time (float in seconds), we have to convert it to
# seconds (float).
min_notification_delay = float(min_notification_delay) / 1000
current_time = time.time()
# We have to update the last notification time before returning the result.
buffer_set_float(
buffer,
'localvar_set_' + LAST_NOTIFICATION_TIME_VAR,
current_time
)
return (min_notification_delay > 0 and
current_time - last_notification_time < min_notification_delay)
def buffer_get_float(buffer, property):
"""A variant of weechat.buffer_get_x() for floats.
This variant is needed because WeeChat supports only buffer_get_string()
and buffer_get_int().
"""
value = weechat.buffer_get_string(buffer, property)
return float(value) if value else 0.0
def buffer_set_float(buffer, property, value):
"""A variant of weechat.buffer_set() for floats.
This variant is needed because WeeChat supports only integers and strings.
"""
weechat.buffer_set(buffer, property, str(value))
def notify_for_current_buffer():
"""Should we also send notifications for the current buffer?"""
return weechat.config_get_plugin('notify_for_current_buffer') == 'on'
def notify_on_highlights():
"""Should we send notifications on highlights?"""
return weechat.config_get_plugin('notify_on_highlights') == 'on'
def notify_on_privmsgs():
"""Should we send notifications on private messages?"""
return weechat.config_get_plugin('notify_on_privmsgs') == 'on'
def notify_when_away():
"""Should we also send notifications when away?"""
return weechat.config_get_plugin('notify_when_away') == 'on'
def is_away(buffer):
"""Is the user away?"""
return weechat.buffer_get_string(buffer, 'localvar_away') != ''
def is_private_message(buffer):
"""Has a private message been sent?"""
return weechat.buffer_get_string(buffer, 'localvar_type') == 'private'
def i_am_author_of_message(buffer, nick):
"""Am I (the current WeeChat user) the author of the message?"""
return weechat.buffer_get_string(buffer, 'localvar_nick') == nick
def ignore_notifications_from_buffer(buffer):
"""Should notifications from the given buffer be ignored?"""
# The 'buffer' parameter is actually the buffer's ID (e.g. '0x2719cf0'). We
# have to check its name (e.g. 'freenode.#weechat') and short name (e.g.
# '#weechat').
buffer_names = [
weechat.buffer_get_string(buffer, 'short_name'),
weechat.buffer_get_string(buffer, 'name')
]
for buffer_name in buffer_names:
if buffer_name and buffer_name in ignored_buffers():
return True
for buffer_name in buffer_names:
for prefix in ignored_buffer_prefixes():
if prefix and buffer_name and buffer_name.startswith(prefix):
return True
return False
def ignored_buffers():
"""A generator of buffers from which notifications should be ignored."""
for buffer in weechat.config_get_plugin('ignore_buffers').split(','):
yield buffer.strip()
def ignored_buffer_prefixes():
"""A generator of buffer prefixes from which notifications should be
ignored.
"""
prefixes = weechat.config_get_plugin('ignore_buffers_starting_with')
for prefix in prefixes.split(','):
yield prefix.strip()
def ignore_notifications_from_nick(nick):
"""Should notifications from the given nick be ignored?"""
if nick in ignored_nicks():
return True
for prefix in ignored_nick_prefixes():
if prefix and nick.startswith(prefix):
return True
return False
def ignored_nicks():
"""A generator of nicks from which notifications should be ignored."""
for nick in weechat.config_get_plugin('ignore_nicks').split(','):
yield nick.strip()
def ignored_nick_prefixes():
"""A generator of nick prefixes from which notifications should be
ignored.
"""
prefixes = weechat.config_get_plugin('ignore_nicks_starting_with')
for prefix in prefixes.split(','):
yield prefix.strip()
def prepare_notification(buffer, is_highlight, nick, message):
"""Prepares a notification from the given data."""
if is_highlight:
source = (weechat.buffer_get_string(buffer, 'short_name') or
weechat.buffer_get_string(buffer, 'name'))
message = nick + nick_separator() + message
else:
# A private message.
source = nick
max_length = int(weechat.config_get_plugin('max_length'))
if max_length > 0:
ellipsis = weechat.config_get_plugin('ellipsis')
message = shorten_message(message, max_length, ellipsis)
if weechat.config_get_plugin('escape_html') == 'on':
message = escape_html(message)
message = escape_slashes(message)
icon = weechat.config_get_plugin('icon')
timeout = weechat.config_get_plugin('timeout')
urgency = weechat.config_get_plugin('urgency')
return Notification(source, message, icon, timeout, urgency)
def nick_separator():
"""Returns a nick separator to be used."""
separator = weechat.config_get_plugin('nick_separator')
return separator if separator else default_value_of('nick_separator')
def shorten_message(message, max_length, ellipsis):
"""Shortens the message to at most max_length characters by using the given
ellipsis.
"""
if max_length <= 0 or len(message) <= max_length:
# Nothing to shorten.
return message
if len(ellipsis) >= max_length:
# We cannot include any part of the message.
return ellipsis[:max_length]
return message[:max_length - len(ellipsis)] + ellipsis
def escape_html(message):
"""Escapes HTML characters in the given message."""
# Only the following characters need to be escaped
# (https://wiki.ubuntu.com/NotificationDevelopmentGuidelines).
message = message.replace('&', '&')
message = message.replace('<', '<')
message = message.replace('>', '>')
return message
def escape_slashes(message):
"""Escapes slashes in the given message."""
# We need to escape backslashes to prevent notify-send from interpreting
# them, e.g. we do not want to print a newline when the message contains
# '\n'.
return message.replace('\\', r'\\')
def send_notification(notification):
"""Sends the given notification to the user."""
notify_cmd = ['notify-send', '--app-name', 'weechat']
if notification.icon:
notify_cmd += ['--icon', notification.icon]
if notification.timeout:
notify_cmd += ['--expire-time', str(notification.timeout)]
if notification.urgency:
notify_cmd += ['--urgency', notification.urgency]
notify_cmd += [notification.source, notification.message]
# Prevent notify-send from messing up the WeeChat screen when occasionally
# emitting assertion messages by redirecting the output to /dev/null (you
# would need to run /redraw to fix the screen).
# In Python < 3.3, there is no subprocess.DEVNULL, so we have to use a
# workaround.
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(
notify_cmd,
stderr=subprocess.STDOUT,
stdout=devnull,
)
if __name__ == '__main__':
# Registration.
weechat.register(
SCRIPT_NAME,
SCRIPT_AUTHOR,
SCRIPT_VERSION,
SCRIPT_LICENSE,
SCRIPT_DESC,
SCRIPT_SHUTDOWN_FUNC,
SCRIPT_CHARSET
)
# Initialization.
for option, (default_value, description) in OPTIONS.items():
description = add_default_value_to(description, default_value)
weechat.config_set_desc_plugin(option, description)
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, default_value)
weechat.hook_print('', 'irc_privmsg', '', 1, 'notification_cb', '')
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.dlp_v2.services.dlp_service.client import DlpServiceClient
from google.cloud.dlp_v2.services.dlp_service.async_client import DlpServiceAsyncClient
from google.cloud.dlp_v2.types.dlp import Action
from google.cloud.dlp_v2.types.dlp import ActivateJobTriggerRequest
from google.cloud.dlp_v2.types.dlp import AnalyzeDataSourceRiskDetails
from google.cloud.dlp_v2.types.dlp import BoundingBox
from google.cloud.dlp_v2.types.dlp import BucketingConfig
from google.cloud.dlp_v2.types.dlp import ByteContentItem
from google.cloud.dlp_v2.types.dlp import CancelDlpJobRequest
from google.cloud.dlp_v2.types.dlp import CharacterMaskConfig
from google.cloud.dlp_v2.types.dlp import CharsToIgnore
from google.cloud.dlp_v2.types.dlp import Color
from google.cloud.dlp_v2.types.dlp import Container
from google.cloud.dlp_v2.types.dlp import ContentItem
from google.cloud.dlp_v2.types.dlp import ContentLocation
from google.cloud.dlp_v2.types.dlp import CreateDeidentifyTemplateRequest
from google.cloud.dlp_v2.types.dlp import CreateDlpJobRequest
from google.cloud.dlp_v2.types.dlp import CreateInspectTemplateRequest
from google.cloud.dlp_v2.types.dlp import CreateJobTriggerRequest
from google.cloud.dlp_v2.types.dlp import CreateStoredInfoTypeRequest
from google.cloud.dlp_v2.types.dlp import CryptoDeterministicConfig
from google.cloud.dlp_v2.types.dlp import CryptoHashConfig
from google.cloud.dlp_v2.types.dlp import CryptoKey
from google.cloud.dlp_v2.types.dlp import CryptoReplaceFfxFpeConfig
from google.cloud.dlp_v2.types.dlp import DateShiftConfig
from google.cloud.dlp_v2.types.dlp import DateTime
from google.cloud.dlp_v2.types.dlp import DeidentifyConfig
from google.cloud.dlp_v2.types.dlp import DeidentifyContentRequest
from google.cloud.dlp_v2.types.dlp import DeidentifyContentResponse
from google.cloud.dlp_v2.types.dlp import DeidentifyTemplate
from google.cloud.dlp_v2.types.dlp import DeleteDeidentifyTemplateRequest
from google.cloud.dlp_v2.types.dlp import DeleteDlpJobRequest
from google.cloud.dlp_v2.types.dlp import DeleteInspectTemplateRequest
from google.cloud.dlp_v2.types.dlp import DeleteJobTriggerRequest
from google.cloud.dlp_v2.types.dlp import DeleteStoredInfoTypeRequest
from google.cloud.dlp_v2.types.dlp import DlpJob
from google.cloud.dlp_v2.types.dlp import DocumentLocation
from google.cloud.dlp_v2.types.dlp import Error
from google.cloud.dlp_v2.types.dlp import ExcludeInfoTypes
from google.cloud.dlp_v2.types.dlp import ExclusionRule
from google.cloud.dlp_v2.types.dlp import FieldTransformation
from google.cloud.dlp_v2.types.dlp import Finding
from google.cloud.dlp_v2.types.dlp import FinishDlpJobRequest
from google.cloud.dlp_v2.types.dlp import FixedSizeBucketingConfig
from google.cloud.dlp_v2.types.dlp import GetDeidentifyTemplateRequest
from google.cloud.dlp_v2.types.dlp import GetDlpJobRequest
from google.cloud.dlp_v2.types.dlp import GetInspectTemplateRequest
from google.cloud.dlp_v2.types.dlp import GetJobTriggerRequest
from google.cloud.dlp_v2.types.dlp import GetStoredInfoTypeRequest
from google.cloud.dlp_v2.types.dlp import HybridContentItem
from google.cloud.dlp_v2.types.dlp import HybridFindingDetails
from google.cloud.dlp_v2.types.dlp import HybridInspectDlpJobRequest
from google.cloud.dlp_v2.types.dlp import HybridInspectJobTriggerRequest
from google.cloud.dlp_v2.types.dlp import HybridInspectResponse
from google.cloud.dlp_v2.types.dlp import HybridInspectStatistics
from google.cloud.dlp_v2.types.dlp import ImageLocation
from google.cloud.dlp_v2.types.dlp import InfoTypeDescription
from google.cloud.dlp_v2.types.dlp import InfoTypeStats
from google.cloud.dlp_v2.types.dlp import InfoTypeTransformations
from google.cloud.dlp_v2.types.dlp import InspectConfig
from google.cloud.dlp_v2.types.dlp import InspectContentRequest
from google.cloud.dlp_v2.types.dlp import InspectContentResponse
from google.cloud.dlp_v2.types.dlp import InspectDataSourceDetails
from google.cloud.dlp_v2.types.dlp import InspectionRule
from google.cloud.dlp_v2.types.dlp import InspectionRuleSet
from google.cloud.dlp_v2.types.dlp import InspectJobConfig
from google.cloud.dlp_v2.types.dlp import InspectResult
from google.cloud.dlp_v2.types.dlp import InspectTemplate
from google.cloud.dlp_v2.types.dlp import JobTrigger
from google.cloud.dlp_v2.types.dlp import KmsWrappedCryptoKey
from google.cloud.dlp_v2.types.dlp import LargeCustomDictionaryConfig
from google.cloud.dlp_v2.types.dlp import LargeCustomDictionaryStats
from google.cloud.dlp_v2.types.dlp import ListDeidentifyTemplatesRequest
from google.cloud.dlp_v2.types.dlp import ListDeidentifyTemplatesResponse
from google.cloud.dlp_v2.types.dlp import ListDlpJobsRequest
from google.cloud.dlp_v2.types.dlp import ListDlpJobsResponse
from google.cloud.dlp_v2.types.dlp import ListInfoTypesRequest
from google.cloud.dlp_v2.types.dlp import ListInfoTypesResponse
from google.cloud.dlp_v2.types.dlp import ListInspectTemplatesRequest
from google.cloud.dlp_v2.types.dlp import ListInspectTemplatesResponse
from google.cloud.dlp_v2.types.dlp import ListJobTriggersRequest
from google.cloud.dlp_v2.types.dlp import ListJobTriggersResponse
from google.cloud.dlp_v2.types.dlp import ListStoredInfoTypesRequest
from google.cloud.dlp_v2.types.dlp import ListStoredInfoTypesResponse
from google.cloud.dlp_v2.types.dlp import Location
from google.cloud.dlp_v2.types.dlp import Manual
from google.cloud.dlp_v2.types.dlp import MetadataLocation
from google.cloud.dlp_v2.types.dlp import OutputStorageConfig
from google.cloud.dlp_v2.types.dlp import PrimitiveTransformation
from google.cloud.dlp_v2.types.dlp import PrivacyMetric
from google.cloud.dlp_v2.types.dlp import QuasiId
from google.cloud.dlp_v2.types.dlp import QuoteInfo
from google.cloud.dlp_v2.types.dlp import Range
from google.cloud.dlp_v2.types.dlp import RecordCondition
from google.cloud.dlp_v2.types.dlp import RecordLocation
from google.cloud.dlp_v2.types.dlp import RecordSuppression
from google.cloud.dlp_v2.types.dlp import RecordTransformations
from google.cloud.dlp_v2.types.dlp import RedactConfig
from google.cloud.dlp_v2.types.dlp import RedactImageRequest
from google.cloud.dlp_v2.types.dlp import RedactImageResponse
from google.cloud.dlp_v2.types.dlp import ReidentifyContentRequest
from google.cloud.dlp_v2.types.dlp import ReidentifyContentResponse
from google.cloud.dlp_v2.types.dlp import ReplaceDictionaryConfig
from google.cloud.dlp_v2.types.dlp import ReplaceValueConfig
from google.cloud.dlp_v2.types.dlp import ReplaceWithInfoTypeConfig
from google.cloud.dlp_v2.types.dlp import RiskAnalysisJobConfig
from google.cloud.dlp_v2.types.dlp import Schedule
from google.cloud.dlp_v2.types.dlp import StatisticalTable
from google.cloud.dlp_v2.types.dlp import StorageMetadataLabel
from google.cloud.dlp_v2.types.dlp import StoredInfoType
from google.cloud.dlp_v2.types.dlp import StoredInfoTypeConfig
from google.cloud.dlp_v2.types.dlp import StoredInfoTypeStats
from google.cloud.dlp_v2.types.dlp import StoredInfoTypeVersion
from google.cloud.dlp_v2.types.dlp import Table
from google.cloud.dlp_v2.types.dlp import TableLocation
from google.cloud.dlp_v2.types.dlp import TimePartConfig
from google.cloud.dlp_v2.types.dlp import TransformationErrorHandling
from google.cloud.dlp_v2.types.dlp import TransformationOverview
from google.cloud.dlp_v2.types.dlp import TransformationSummary
from google.cloud.dlp_v2.types.dlp import TransientCryptoKey
from google.cloud.dlp_v2.types.dlp import UnwrappedCryptoKey
from google.cloud.dlp_v2.types.dlp import UpdateDeidentifyTemplateRequest
from google.cloud.dlp_v2.types.dlp import UpdateInspectTemplateRequest
from google.cloud.dlp_v2.types.dlp import UpdateJobTriggerRequest
from google.cloud.dlp_v2.types.dlp import UpdateStoredInfoTypeRequest
from google.cloud.dlp_v2.types.dlp import Value
from google.cloud.dlp_v2.types.dlp import ValueFrequency
from google.cloud.dlp_v2.types.dlp import ContentOption
from google.cloud.dlp_v2.types.dlp import DlpJobType
from google.cloud.dlp_v2.types.dlp import InfoTypeSupportedBy
from google.cloud.dlp_v2.types.dlp import MatchingType
from google.cloud.dlp_v2.types.dlp import MetadataType
from google.cloud.dlp_v2.types.dlp import RelationalOperator
from google.cloud.dlp_v2.types.dlp import StoredInfoTypeState
from google.cloud.dlp_v2.types.storage import BigQueryField
from google.cloud.dlp_v2.types.storage import BigQueryKey
from google.cloud.dlp_v2.types.storage import BigQueryOptions
from google.cloud.dlp_v2.types.storage import BigQueryTable
from google.cloud.dlp_v2.types.storage import CloudStorageFileSet
from google.cloud.dlp_v2.types.storage import CloudStorageOptions
from google.cloud.dlp_v2.types.storage import CloudStoragePath
from google.cloud.dlp_v2.types.storage import CloudStorageRegexFileSet
from google.cloud.dlp_v2.types.storage import CustomInfoType
from google.cloud.dlp_v2.types.storage import DatastoreKey
from google.cloud.dlp_v2.types.storage import DatastoreOptions
from google.cloud.dlp_v2.types.storage import EntityId
from google.cloud.dlp_v2.types.storage import FieldId
from google.cloud.dlp_v2.types.storage import HybridOptions
from google.cloud.dlp_v2.types.storage import InfoType
from google.cloud.dlp_v2.types.storage import Key
from google.cloud.dlp_v2.types.storage import KindExpression
from google.cloud.dlp_v2.types.storage import PartitionId
from google.cloud.dlp_v2.types.storage import RecordKey
from google.cloud.dlp_v2.types.storage import StorageConfig
from google.cloud.dlp_v2.types.storage import StoredType
from google.cloud.dlp_v2.types.storage import TableOptions
from google.cloud.dlp_v2.types.storage import FileType
from google.cloud.dlp_v2.types.storage import Likelihood
__all__ = (
"DlpServiceClient",
"DlpServiceAsyncClient",
"Action",
"ActivateJobTriggerRequest",
"AnalyzeDataSourceRiskDetails",
"BoundingBox",
"BucketingConfig",
"ByteContentItem",
"CancelDlpJobRequest",
"CharacterMaskConfig",
"CharsToIgnore",
"Color",
"Container",
"ContentItem",
"ContentLocation",
"CreateDeidentifyTemplateRequest",
"CreateDlpJobRequest",
"CreateInspectTemplateRequest",
"CreateJobTriggerRequest",
"CreateStoredInfoTypeRequest",
"CryptoDeterministicConfig",
"CryptoHashConfig",
"CryptoKey",
"CryptoReplaceFfxFpeConfig",
"DateShiftConfig",
"DateTime",
"DeidentifyConfig",
"DeidentifyContentRequest",
"DeidentifyContentResponse",
"DeidentifyTemplate",
"DeleteDeidentifyTemplateRequest",
"DeleteDlpJobRequest",
"DeleteInspectTemplateRequest",
"DeleteJobTriggerRequest",
"DeleteStoredInfoTypeRequest",
"DlpJob",
"DocumentLocation",
"Error",
"ExcludeInfoTypes",
"ExclusionRule",
"FieldTransformation",
"Finding",
"FinishDlpJobRequest",
"FixedSizeBucketingConfig",
"GetDeidentifyTemplateRequest",
"GetDlpJobRequest",
"GetInspectTemplateRequest",
"GetJobTriggerRequest",
"GetStoredInfoTypeRequest",
"HybridContentItem",
"HybridFindingDetails",
"HybridInspectDlpJobRequest",
"HybridInspectJobTriggerRequest",
"HybridInspectResponse",
"HybridInspectStatistics",
"ImageLocation",
"InfoTypeDescription",
"InfoTypeStats",
"InfoTypeTransformations",
"InspectConfig",
"InspectContentRequest",
"InspectContentResponse",
"InspectDataSourceDetails",
"InspectionRule",
"InspectionRuleSet",
"InspectJobConfig",
"InspectResult",
"InspectTemplate",
"JobTrigger",
"KmsWrappedCryptoKey",
"LargeCustomDictionaryConfig",
"LargeCustomDictionaryStats",
"ListDeidentifyTemplatesRequest",
"ListDeidentifyTemplatesResponse",
"ListDlpJobsRequest",
"ListDlpJobsResponse",
"ListInfoTypesRequest",
"ListInfoTypesResponse",
"ListInspectTemplatesRequest",
"ListInspectTemplatesResponse",
"ListJobTriggersRequest",
"ListJobTriggersResponse",
"ListStoredInfoTypesRequest",
"ListStoredInfoTypesResponse",
"Location",
"Manual",
"MetadataLocation",
"OutputStorageConfig",
"PrimitiveTransformation",
"PrivacyMetric",
"QuasiId",
"QuoteInfo",
"Range",
"RecordCondition",
"RecordLocation",
"RecordSuppression",
"RecordTransformations",
"RedactConfig",
"RedactImageRequest",
"RedactImageResponse",
"ReidentifyContentRequest",
"ReidentifyContentResponse",
"ReplaceDictionaryConfig",
"ReplaceValueConfig",
"ReplaceWithInfoTypeConfig",
"RiskAnalysisJobConfig",
"Schedule",
"StatisticalTable",
"StorageMetadataLabel",
"StoredInfoType",
"StoredInfoTypeConfig",
"StoredInfoTypeStats",
"StoredInfoTypeVersion",
"Table",
"TableLocation",
"TimePartConfig",
"TransformationErrorHandling",
"TransformationOverview",
"TransformationSummary",
"TransientCryptoKey",
"UnwrappedCryptoKey",
"UpdateDeidentifyTemplateRequest",
"UpdateInspectTemplateRequest",
"UpdateJobTriggerRequest",
"UpdateStoredInfoTypeRequest",
"Value",
"ValueFrequency",
"ContentOption",
"DlpJobType",
"InfoTypeSupportedBy",
"MatchingType",
"MetadataType",
"RelationalOperator",
"StoredInfoTypeState",
"BigQueryField",
"BigQueryKey",
"BigQueryOptions",
"BigQueryTable",
"CloudStorageFileSet",
"CloudStorageOptions",
"CloudStoragePath",
"CloudStorageRegexFileSet",
"CustomInfoType",
"DatastoreKey",
"DatastoreOptions",
"EntityId",
"FieldId",
"HybridOptions",
"InfoType",
"Key",
"KindExpression",
"PartitionId",
"RecordKey",
"StorageConfig",
"StoredType",
"TableOptions",
"FileType",
"Likelihood",
)
| |
# Copyright 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Simulates time itself to make the fake mode tests run even faster.
Specifically, this forces all various threads of execution to run one at a time
based on when they would have been scheduled using the various eventlet spawn
functions. Because only one thing is running at a given time, it eliminates
race conditions that would normally be present from testing multi-threaded
scenarios. It also means that the simulated time.sleep does not actually have
to sit around for the designated time, which greatly speeds up the time it
takes to run the tests.
Event Simulator Overview
========================
We use this to simulate all the threads of Trove running.
i.e (api,taskmanager,probocsis tests) All the services end
up sleeping and having to wait for something to happen at times.
Monkey Patching Methods
-----------------------
We monkey patch a few method to make this happen.
A few sleep methods with a fake_sleep.
* time.sleep
* eventlet.sleep
* greenthread.sleep
A few spawn methods with a fake_spawn
* eventlet.spawn_after
* eventlet.spawn_n
Raise an error if you try this one.
* eventlet.spawn
Replace the poll_until with a fake_poll_until.
Coroutine Object
----------------
There is a Coroutine object here that mimics the behavior of a thread.
It takes in a function with args and kwargs and executes it. If at any
point that method calls time.sleep(seconds) then the event simulator will
put that method on the stack of threads and run the fake_sleep method
that will then iterate over all the threads in the stack updating the time
they still need to sleep. Then as the threads hit the end of their sleep
time period they will continue to execute.
fake_threads
------------
One thing to note here is the idea of a stack of threads being kept in
fake_threads list. Any new thread created is added to this stack.
A fake_thread attributes:
fake_thread = {
'sleep': time_from_now_in_seconds,
'greenlet': Coroutine(method_to_execute),
'name': str(func)
}
'sleep' is the time it should wait to execute this method.
'greenlet' is the thread object
'name' is the unique name of the thread to track
main_loop Method
----------------
The main_loop method is a loop that runs forever waiting on all the
threads to complete while running pulse every 0.1 seconds. This is the
key to simulated the threads quickly. We are pulsing every 0.1
seconds looking to make sure there are no threads just waiting around for
no reason rather than waiting a full second to respond.
pulse Method
------------
The pulse method is going through the stack(list) of threads looking for
the next thread to execute while updating the 'sleep' time and the if
the 'sleep' time is <=0 then it will run this thread until it calls for
another time.sleep.
If the method/thread running calls time.sleep for what ever reason then
the thread's 'sleep' parameter is updated to the new 'next_sleep_time'.
If the method/thread running completes without calling time.sleep because it
finished all work needed to be done then there the 'next_sleep_time' is set
to None and the method/thread is deleted from the stack(list) of threads.
"""
import eventlet
from eventlet.event import Event
from eventlet.semaphore import Semaphore
from eventlet import spawn as true_spawn
class Coroutine(object):
"""
This class simulates a coroutine, which is ironic, as greenlet actually
*is* a coroutine. But trying to use greenlet here gives nasty results
since eventlet thoroughly monkey-patches things, making it difficult
to run greenlet on its own.
Essentially think of this as a wrapper for eventlet's threads which has a
run and sleep function similar to old school coroutines, meaning it won't
start until told and when asked to sleep it won't wake back up without
permission.
"""
ALL = []
def __init__(self, func, *args, **kwargs):
self.my_sem = Semaphore(0) # This is held by the thread as it runs.
self.caller_sem = None
self.dead = False
started = Event()
self.id = 5
self.ALL.append(self)
def go():
self.id = eventlet.corolocal.get_ident()
started.send(True)
self.my_sem.acquire(blocking=True, timeout=None)
try:
func(*args, **kwargs)
# except Exception as e:
# print("Exception in coroutine! %s" % e)
finally:
self.dead = True
self.caller_sem.release() # Relinquish control back to caller.
for i in range(len(self.ALL)):
if self.ALL[i].id == self.id:
del self.ALL[i]
break
true_spawn(go)
started.wait()
@classmethod
def get_current(cls):
"""Finds the coroutine associated with the thread which calls it."""
return cls.get_by_id(eventlet.corolocal.get_ident())
@classmethod
def get_by_id(cls, id):
for cr in cls.ALL:
if cr.id == id:
return cr
raise RuntimeError("Coroutine with id %s not found!" % id)
def sleep(self):
"""Puts the coroutine to sleep until run is called again.
This should only be called by the thread which owns this object.
"""
# Only call this from its own thread.
assert eventlet.corolocal.get_ident() == self.id
self.caller_sem.release() # Relinquish control back to caller.
self.my_sem.acquire(blocking=True, timeout=None)
def run(self):
"""Starts up the thread. Should be called from a different thread."""
# Don't call this from the thread which it represents.
assert eventlet.corolocal.get_ident() != self.id
self.caller_sem = Semaphore(0)
self.my_sem.release()
self.caller_sem.acquire() # Wait for it to finish.
# Main global thread to run.
main_greenlet = None
# Stack of threads currently running or sleeping
fake_threads = []
# Allow a sleep method to be called at least this number of times before
# raising an error that there are not other active threads waiting to run.
allowable_empty_sleeps = 1
sleep_allowance = allowable_empty_sleeps
def other_threads_are_active():
"""Returns True if concurrent activity is being simulated.
Specifically, this means there is a fake thread in action other than the
"pulse" thread and the main test thread.
"""
return len(fake_threads) >= 2
def fake_sleep(time_to_sleep):
"""Simulates sleep.
Puts the coroutine which calls it to sleep. If a coroutine object is not
associated with the caller this will fail.
"""
if time_to_sleep:
global sleep_allowance
sleep_allowance -= 1
if not other_threads_are_active():
if sleep_allowance < -1:
raise RuntimeError("Sleeping for no reason.")
else:
return # Forgive the thread for calling this for one time.
sleep_allowance = allowable_empty_sleeps
cr = Coroutine.get_current()
for ft in fake_threads:
if ft['greenlet'].id == cr.id:
ft['next_sleep_time'] = time_to_sleep
cr.sleep()
def fake_poll_until(retriever, condition=lambda value: value,
sleep_time=1, time_out=None):
"""Fakes out poll until."""
from trove.common import exception
slept_time = 0
while True:
resource = retriever()
if condition(resource):
return resource
fake_sleep(sleep_time)
slept_time += sleep_time
if time_out and slept_time >= time_out:
raise exception.PollTimeOut()
def run_main(func):
"""Runs the given function as the initial thread of the event simulator."""
global main_greenlet
main_greenlet = Coroutine(main_loop)
fake_spawn(0, func)
main_greenlet.run()
def main_loop():
"""The coroutine responsible for calling each "fake thread."
The Coroutine which calls this is the only one that won't end up being
associated with the fake_threads list. The reason is this loop needs to
wait on whatever thread is running, meaning it has to be a Coroutine as
well.
"""
while len(fake_threads) > 0:
pulse(0.1)
def fake_spawn_n(func, *args, **kw):
fake_spawn(0, func, *args, **kw)
def fake_spawn(time_from_now_in_seconds, func, *args, **kw):
"""Fakes eventlet's spawn function by adding a fake thread."""
def thread_start():
# fake_sleep(time_from_now_in_seconds)
return func(*args, **kw)
cr = Coroutine(thread_start)
fake_threads.append({'sleep': time_from_now_in_seconds,
'greenlet': cr,
'name': str(func)})
def pulse(seconds):
"""
Runs the event simulator for the amount of simulated time denoted by
"seconds".
"""
index = 0
while index < len(fake_threads):
t = fake_threads[index]
t['sleep'] -= seconds
if t['sleep'] <= 0:
t['sleep'] = 0
t['next_sleep_time'] = None
t['greenlet'].run()
sleep_time = t['next_sleep_time']
if sleep_time is None or isinstance(sleep_time, tuple):
del fake_threads[index]
index -= 1
else:
t['sleep'] = sleep_time
index += 1
def wait_until_all_activity_stops():
"""In fake mode, wait for all simulated events to chill out.
This can be useful in situations where you need simulated activity (such
as calls running in TaskManager) to "bleed out" and finish before running
another test.
"""
if main_greenlet is None:
return
while other_threads_are_active():
fake_sleep(1)
def monkey_patch():
"""
Changes global functions such as time.sleep, eventlet.spawn* and others
to their event_simulator equivalents.
"""
import time
time.sleep = fake_sleep
import eventlet
from eventlet import greenthread
eventlet.sleep = fake_sleep
greenthread.sleep = fake_sleep
eventlet.spawn_after = fake_spawn
def raise_error():
raise RuntimeError("Illegal operation!")
eventlet.spawn_n = fake_spawn_n
eventlet.spawn = raise_error
from trove.common import utils
utils.poll_until = fake_poll_until
| |
"""Sparse rational function fields. """
from __future__ import print_function, division
from operator import add, mul, lt, le, gt, ge
from sympy.core.compatibility import reduce, string_types
from sympy.core.expr import Expr
from sympy.core.symbol import Symbol
from sympy.core.sympify import CantSympify, sympify
from sympy.polys.rings import PolyElement
from sympy.polys.orderings import lex
from sympy.polys.polyerrors import CoercionFailed
from sympy.polys.domains.domainelement import DomainElement
from sympy.polys.domains.polynomialring import PolynomialRing
from sympy.polys.domains.fractionfield import FractionField
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.magic import pollute
@public
def field(symbols, domain, order=lex):
"""Construct new rational function field returning (field, x1, ..., xn). """
_field = FracField(symbols, domain, order)
return (_field,) + _field.gens
@public
def xfield(symbols, domain, order=lex):
"""Construct new rational function field returning (field, (x1, ..., xn)). """
_field = FracField(symbols, domain, order)
return (_field, _field.gens)
@public
def vfield(symbols, domain, order=lex):
"""Construct new rational function field and inject generators into global namespace. """
_field = FracField(symbols, domain, order)
pollute([ sym.name for sym in _field.symbols ], _field.gens)
return _field
@public
def sfield(exprs, *symbols, **options):
"""Construct a field deriving generators and domain from options and input expressions. """
raise NotImplementedError
_field_cache = {}
class FracField(DefaultPrinting):
"""Multivariate distributed rational function field. """
def __new__(cls, symbols, domain, order=lex):
from sympy.polys.rings import PolyRing
ring = PolyRing(symbols, domain, order)
symbols = ring.symbols
ngens = ring.ngens
domain = ring.domain
order = ring.order
_hash = hash((cls.__name__, symbols, ngens, domain, order))
obj = _field_cache.get(_hash)
if obj is None:
obj = object.__new__(cls)
obj._hash = _hash
obj.ring = ring
obj.dtype = type("FracElement", (FracElement,), {"field": obj})
obj.symbols = symbols
obj.ngens = ngens
obj.domain = domain
obj.order = order
obj.zero = obj.dtype(ring.zero)
obj.one = obj.dtype(ring.one)
obj.gens = obj._gens()
for symbol, generator in zip(obj.symbols, obj.gens):
if isinstance(symbol, Symbol):
name = symbol.name
if not hasattr(obj, name):
setattr(obj, name, generator)
_field_cache[_hash] = obj
return obj
def _gens(self):
"""Return a list of polynomial generators. """
return tuple([ self.dtype(gen) for gen in self.ring.gens ])
def __getnewargs__(self):
return (self.symbols, self.domain, self.order)
def __hash__(self):
return self._hash
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def raw_new(self, numer, denom=None):
return self.dtype(numer, denom)
def new(self, numer, denom=None):
if denom is None: denom = self.ring.one
numer, denom = numer.cancel(denom)
return self.raw_new(numer, denom)
def domain_new(self, element):
return self.domain.convert(element)
def ground_new(self, element):
try:
return self.new(self.ring.ground_new(element))
except CoercionFailed:
domain = self.domain
if not domain.has_Field and domain.has_assoc_Field:
ring = self.ring
ground_field = domain.get_field()
element = ground_field.convert(element)
numer = ring.ground_new(ground_field.numer(element))
denom = ring.ground_new(ground_field.denom(element))
return self.raw_new(numer, denom)
else:
raise
def field_new(self, element):
if isinstance(element, FracElement):
if self == element.field:
return element
else:
raise NotImplementedError("conversion")
elif isinstance(element, PolyElement):
denom, numer = element.clear_denoms()
numer = numer.set_ring(self.ring)
denom = self.ring.ground_new(denom)
return self.raw_new(numer, denom)
elif isinstance(element, tuple) and len(element) == 2:
numer, denom = list(map(self.ring.ring_new, element))
return self.new(numer, denom)
elif isinstance(element, string_types):
raise NotImplementedError("parsing")
elif isinstance(element, Expr):
return self.from_expr(element)
else:
return self.ground_new(element)
__call__ = field_new
def _rebuild_expr(self, expr, mapping):
domain = self.domain
def _rebuild(expr):
generator = mapping.get(expr)
if generator is not None:
return generator
elif expr.is_Add:
return reduce(add, list(map(_rebuild, expr.args)))
elif expr.is_Mul:
return reduce(mul, list(map(_rebuild, expr.args)))
elif expr.is_Pow and expr.exp.is_Integer:
return _rebuild(expr.base)**int(expr.exp)
else:
try:
return domain.convert(expr)
except CoercionFailed:
if not domain.has_Field and domain.has_assoc_Field:
return domain.get_field().convert(expr)
else:
raise
return _rebuild(sympify(expr))
def from_expr(self, expr):
mapping = dict(list(zip(self.symbols, self.gens)))
try:
frac = self._rebuild_expr(expr, mapping)
except CoercionFailed:
raise ValueError("expected an expression convertible to a rational function in %s, got %s" % (self, expr))
else:
return self.field_new(frac)
def to_domain(self):
return FractionField(self)
def to_ring(self):
from sympy.polys.rings import PolyRing
return PolyRing(self.symbols, self.domain, self.order)
class FracElement(DomainElement, DefaultPrinting, CantSympify):
"""Element of multivariate distributed rational function field. """
def __init__(self, numer, denom=None):
if denom is None:
denom = self.field.ring.one
elif not denom:
raise ZeroDivisionError("zero denominator")
self.numer = numer
self.denom = denom
def raw_new(f, numer, denom):
return f.__class__(numer, denom)
def new(f, numer, denom):
return f.raw_new(*numer.cancel(denom))
def to_poly(f):
if f.denom != 1:
raise ValueError("f.denom should be 1")
return f.numer
def parent(self):
return self.field.to_domain()
def __getnewargs__(self):
return (self.field, self.numer, self.denom)
_hash = None
def __hash__(self):
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.field, self.numer, self.denom))
return _hash
def copy(self):
return self.raw_new(self.numer.copy(), self.denom.copy())
def set_field(self, new_field):
if self.field == new_field:
return self
else:
new_ring = new_field.ring
numer = self.numer.set_ring(new_ring)
denom = self.denom.set_ring(new_ring)
return new_field.new(numer, denom)
def as_expr(self, *symbols):
return self.numer.as_expr(*symbols)/self.denom.as_expr(*symbols)
def __eq__(f, g):
if isinstance(g, f.field.dtype):
return f.numer == g.numer and f.denom == g.denom
else:
return f.numer == g and f.denom == f.field.ring.one
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return bool(f.numer)
__bool__ = __nonzero__
def sort_key(self):
return (self.denom.sort_key(), self.numer.sort_key())
def _cmp(f1, f2, op):
if isinstance(f2, f1.field.dtype):
return op(f1.sort_key(), f2.sort_key())
else:
return NotImplemented
def __lt__(f1, f2):
return f1._cmp(f2, lt)
def __le__(f1, f2):
return f1._cmp(f2, le)
def __gt__(f1, f2):
return f1._cmp(f2, gt)
def __ge__(f1, f2):
return f1._cmp(f2, ge)
def __pos__(f):
"""Negate all coefficients in ``f``. """
return f.raw_new(f.numer, f.denom)
def __neg__(f):
"""Negate all coefficients in ``f``. """
return f.raw_new(-f.numer, f.denom)
def _extract_ground(self, element):
domain = self.field.domain
try:
element = domain.convert(element)
except CoercionFailed:
if not domain.has_Field and domain.has_assoc_Field:
ground_field = domain.get_field()
try:
element = ground_field.convert(element)
except CoercionFailed:
pass
else:
return -1, ground_field.numer(element), ground_field.denom(element)
return 0, None, None
else:
return 1, element, None
def __add__(f, g):
"""Add rational functions ``f`` and ``g``. """
field = f.field
if not g:
return f
elif not f:
return g
elif isinstance(g, field.dtype):
if f.denom == g.denom:
return f.new(f.numer + g.numer, f.denom)
else:
return f.new(f.numer*g.denom + f.denom*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer + f.denom*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__radd__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__radd__(f)
return f.__radd__(g)
def __radd__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(f.numer + f.denom*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.numer + f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom + f.denom*g_numer, f.denom*g_denom)
def __sub__(f, g):
"""Subtract rational functions ``f`` and ``g``. """
field = f.field
if not g:
return f
elif not f:
return -g
elif isinstance(g, field.dtype):
if f.denom == g.denom:
return f.new(f.numer - g.numer, f.denom)
else:
return f.new(f.numer*g.denom - f.denom*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer - f.denom*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rsub__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rsub__(f)
op, g_numer, g_denom = f._extract_ground(g)
if op == 1:
return f.new(f.numer - f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom - f.denom*g_numer, f.denom*g_denom)
def __rsub__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(-f.numer + f.denom*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(-f.numer + f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(-f.numer*g_denom + f.denom*g_numer, f.denom*g_denom)
def __mul__(f, g):
"""Multiply rational functions ``f`` and ``g``. """
field = f.field
if not f or not g:
return field.zero
elif isinstance(g, field.dtype):
return f.new(f.numer*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rmul__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rmul__(f)
return f.__rmul__(g)
def __rmul__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(f.numer*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.numer*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_numer, f.denom*g_denom)
def __truediv__(f, g):
"""Computes quotient of fractions ``f`` and ``g``. """
field = f.field
if not g:
raise ZeroDivisionError
elif isinstance(g, field.dtype):
return f.new(f.numer*g.denom, f.denom*g.numer)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer, f.denom*g)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rtruediv__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rtruediv__(f)
op, g_numer, g_denom = f._extract_ground(g)
if op == 1:
return f.new(f.numer, f.denom*g_numer)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom, f.denom*g_numer)
__div__ = __truediv__
def __rtruediv__(f, c):
if not f:
raise ZeroDivisionError
elif isinstance(c, f.field.ring.dtype):
return f.new(f.denom*c, f.numer)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.denom*g_numer, f.numer)
elif not op:
return NotImplemented
else:
return f.new(f.denom*g_numer, f.numer*g_denom)
__rdiv__ = __rtruediv__
def __pow__(f, n):
"""Raise ``f`` to a non-negative power ``n``. """
if n >= 0:
return f.raw_new(f.numer**n, f.denom**n)
elif not f:
raise ZeroDivisionError
else:
return f.raw_new(f.denom**-n, f.numer**-n)
def diff(f, x):
"""Computes partial derivative in ``x``.
Examples
--------
>>> from sympy.polys.fields import field
>>> from sympy.polys.domains import ZZ
>>> _, x, y, z = field("x,y,z", ZZ)
>>> ((x**2 + y)/(z + 1)).diff(x)
2*x/(z + 1)
"""
x = x.to_poly()
return f.new(f.numer.diff(x)*f.denom - f.numer*f.denom.diff(x), f.denom**2)
def __call__(f, *values):
if 0 < len(values) <= f.field.ngens:
return f.evaluate(list(zip(f.field.gens, values)))
else:
raise ValueError("expected at least 1 and at most %s values, got %s" % (f.field.ngens, len(values)))
def evaluate(f, x, a=None):
if isinstance(x, list) and a is None:
x = [ (X.to_poly(), a) for X, a in x ]
numer, denom = f.numer.evaluate(x), f.denom.evaluate(x)
else:
x = x.to_poly()
numer, denom = f.numer.evaluate(x, a), f.denom.evaluate(x, a)
field = numer.ring.to_field()
return field.new(numer, denom)
def subs(f, x, a=None):
if isinstance(x, list) and a is None:
x = [ (X.to_poly(), a) for X, a in x ]
numer, denom = f.numer.subs(x), f.denom.subs(x)
else:
x = x.to_poly()
numer, denom = f.numer.subs(x, a), f.denom.subs(x, a)
return f.new(numer, denom)
def compose(f, x, a=None):
raise NotImplementedError
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import json
from builtins import str
from base64 import b64encode
from cassandra.util import Date, Time, SortedSet, OrderedMapSerializedKey
from datetime import datetime
from decimal import Decimal
from six import text_type, binary_type, PY3
from tempfile import NamedTemporaryFile
from uuid import UUID
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.contrib.hooks.cassandra_hook import CassandraHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class CassandraToGoogleCloudStorageOperator(BaseOperator):
"""
Copy data from Cassandra to Google cloud storage in JSON format
Note: Arrays of arrays are not supported.
"""
template_fields = ('cql', 'bucket', 'filename', 'schema_filename',)
template_ext = ('.cql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self,
cql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
cassandra_conn_id='cassandra_default',
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
:param cql: The CQL to execute on the Cassandra table.
:type cql: string
:param bucket: The bucket to upload to.
:type bucket: string
:param filename: The filename to use as the object name when uploading
to Google cloud storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: string
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from MySQL.
:type schema_filename: string
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filenamed param docs above). Google cloud storage allows for files
to be a maximum of 4GB. This param allows developers to specify the
file size of the splits.
:type approx_max_file_size_bytes: long
:param cassandra_conn_id: Reference to a specific Cassandra hook.
:type cassandra_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
"""
super(CassandraToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.cql = cql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.cassandra_conn_id = cassandra_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.hook = None
# Default Cassandra to BigQuery type mapping
CQL_TYPE_MAP = {
'BytesType': 'BYTES',
'DecimalType': 'FLOAT',
'UUIDType': 'BYTES',
'BooleanType': 'BOOL',
'ByteType': 'INTEGER',
'AsciiType': 'STRING',
'FloatType': 'FLOAT',
'DoubleType': 'FLOAT',
'LongType': 'INTEGER',
'Int32Type': 'INTEGER',
'IntegerType': 'INTEGER',
'InetAddressType': 'STRING',
'CounterColumnType': 'INTEGER',
'DateType': 'TIMESTAMP',
'SimpleDateType': 'DATE',
'TimestampType': 'TIMESTAMP',
'TimeUUIDType': 'BYTES',
'ShortType': 'INTEGER',
'TimeType': 'TIME',
'DurationType': 'INTEGER',
'UTF8Type': 'STRING',
'VarcharType': 'STRING',
}
def execute(self, context):
cursor = self._query_cassandra()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.update(self._write_local_schema_file(cursor))
# Flush all files before uploading
for file_handle in files_to_upload.values():
file_handle.flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for file_handle in files_to_upload.values():
file_handle.close()
# Close all sessions and connection associated with this Cassandra cluster
self.hook.shutdown_cluster()
def _query_cassandra(self):
"""
Queries cassandra and returns a cursor to the results.
"""
self.hook = CassandraHook(cassandra_conn_id=self.cassandra_conn_id)
session = self.hook.get_conn()
cursor = session.execute(self.cql)
return cursor
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}
for row in cursor:
row_dict = self.generate_data_dict(row._fields, row)
s = json.dumps(row_dict)
if PY3:
s = s.encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = []
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
for name, type in zip(cursor.column_names, cursor.column_types):
schema.append(self.generate_schema_dict(name, type))
json_serialized_schema = json.dumps(schema)
if PY3:
json_serialized_schema = json_serialized_schema.encode('utf-8')
tmp_schema_file_handle.write(json_serialized_schema)
return {self.schema_filename: tmp_schema_file_handle}
def _upload_to_gcs(self, files_to_upload):
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
for object, tmp_file_handle in files_to_upload.items():
hook.upload(self.bucket, object, tmp_file_handle.name, 'application/json')
@classmethod
def generate_data_dict(cls, names, values):
row_dict = {}
for name, value in zip(names, values):
row_dict.update({name: cls.convert_value(name, value)})
return row_dict
@classmethod
def convert_value(cls, name, value):
if not value:
return value
elif isinstance(value, (text_type, int, float, bool, dict)):
return value
elif isinstance(value, binary_type):
return b64encode(value).decode('ascii')
elif isinstance(value, UUID):
return b64encode(value.bytes).decode('ascii')
elif isinstance(value, (datetime, Date)):
return str(value)
elif isinstance(value, Decimal):
return float(value)
elif isinstance(value, Time):
return str(value).split('.')[0]
elif isinstance(value, (list, SortedSet)):
return cls.convert_array_types(name, value)
elif hasattr(value, '_fields'):
return cls.convert_user_type(name, value)
elif isinstance(value, tuple):
return cls.convert_tuple_type(name, value)
elif isinstance(value, OrderedMapSerializedKey):
return cls.convert_map_type(name, value)
else:
raise AirflowException('unexpected value: ' + str(value))
@classmethod
def convert_array_types(cls, name, value):
return [cls.convert_value(name, nested_value) for nested_value in value]
@classmethod
def convert_user_type(cls, name, value):
"""
Converts a user type to RECORD that contains n fields, where n is the
number of attributes. Each element in the user type class will be converted to its
corresponding data type in BQ.
"""
names = value._fields
values = [cls.convert_value(name, getattr(value, name)) for name in names]
return cls.generate_data_dict(names, values)
@classmethod
def convert_tuple_type(cls, name, value):
"""
Converts a tuple to RECORD that contains n fields, each will be converted
to its corresponding data type in bq and will be named 'field_<index>', where
index is determined by the order of the tuple elments defined in cassandra.
"""
names = ['field_' + str(i) for i in range(len(value))]
values = [cls.convert_value(name, value) for name, value in zip(names, value)]
return cls.generate_data_dict(names, values)
@classmethod
def convert_map_type(cls, name, value):
"""
Converts a map to a repeated RECORD that contains two fields: 'key' and 'value',
each will be converted to its corresopnding data type in BQ.
"""
converted_map = []
for k, v in zip(value.keys(), value.values()):
converted_map.append({
'key': cls.convert_value('key', k),
'value': cls.convert_value('value', v)
})
return converted_map
@classmethod
def generate_schema_dict(cls, name, type):
field_schema = dict()
field_schema.update({'name': name})
field_schema.update({'type': cls.get_bq_type(type)})
field_schema.update({'mode': cls.get_bq_mode(type)})
fields = cls.get_bq_fields(name, type)
if fields:
field_schema.update({'fields': fields})
return field_schema
@classmethod
def get_bq_fields(cls, name, type):
fields = []
if not cls.is_simple_type(type):
names, types = [], []
if cls.is_array_type(type) and cls.is_record_type(type.subtypes[0]):
names = type.subtypes[0].fieldnames
types = type.subtypes[0].subtypes
elif cls.is_record_type(type):
names = type.fieldnames
types = type.subtypes
if types and not names and type.cassname == 'TupleType':
names = ['field_' + str(i) for i in range(len(types))]
elif types and not names and type.cassname == 'MapType':
names = ['key', 'value']
for name, type in zip(names, types):
field = cls.generate_schema_dict(name, type)
fields.append(field)
return fields
@classmethod
def is_simple_type(cls, type):
return type.cassname in CassandraToGoogleCloudStorageOperator.CQL_TYPE_MAP
@classmethod
def is_array_type(cls, type):
return type.cassname in ['ListType', 'SetType']
@classmethod
def is_record_type(cls, type):
return type.cassname in ['UserType', 'TupleType', 'MapType']
@classmethod
def get_bq_type(cls, type):
if cls.is_simple_type(type):
return CassandraToGoogleCloudStorageOperator.CQL_TYPE_MAP[type.cassname]
elif cls.is_record_type(type):
return 'RECORD'
elif cls.is_array_type(type):
return cls.get_bq_type(type.subtypes[0])
else:
raise AirflowException('Not a supported type: ' + type.cassname)
@classmethod
def get_bq_mode(cls, type):
if cls.is_array_type(type) or type.cassname == 'MapType':
return 'REPEATED'
elif cls.is_record_type(type) or cls.is_simple_type(type):
return 'NULLABLE'
else:
raise AirflowException('Not a supported type: ' + type.cassname)
| |
# -*- encoding: utf-8 -*-
#
# Author: John Tran <jhtran@att.com>
# Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from __future__ import absolute_import
import datetime
import eventlet
import operator
import os
import types
from sqlalchemy import and_
from sqlalchemy import desc
from sqlalchemy import func
from sqlalchemy.orm import aliased
from sqlalchemy import pool
from ceilometer.openstack.common.db import exception as dbexc
import ceilometer.openstack.common.db.sqlalchemy.session as sqlalchemy_session
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import log
from ceilometer.openstack.common import timeutils
from ceilometer.storage import base
from ceilometer.storage import models as api_models
from ceilometer.storage.sqlalchemy import migration
from ceilometer.storage.sqlalchemy import models
from ceilometer import utils
LOG = log.getLogger(__name__)
class SQLAlchemyStorage(base.StorageEngine):
"""Put the data into a SQLAlchemy database.
Tables::
- user
- { id: user uuid }
- source
- { id: source id }
- project
- { id: project uuid }
- meter
- the raw incoming data
- { id: meter id
counter_name: counter name
user_id: user uuid (->user.id)
project_id: project uuid (->project.id)
resource_id: resource uuid (->resource.id)
resource_metadata: metadata dictionaries
counter_type: counter type
counter_unit: counter unit
counter_volume: counter volume
timestamp: datetime
message_signature: message signature
message_id: message uuid
}
- resource
- the metadata for resources
- { id: resource uuid
resource_metadata: metadata dictionaries
project_id: project uuid (->project.id)
user_id: user uuid (->user.id)
}
- sourceassoc
- the relationships
- { meter_id: meter id (->meter.id)
project_id: project uuid (->project.id)
resource_id: resource uuid (->resource.id)
user_id: user uuid (->user.id)
source_id: source id (->source.id)
}
"""
@staticmethod
def get_connection(conf):
"""Return a Connection instance based on the configuration settings.
"""
return Connection(conf)
META_TYPE_MAP = {bool: models.MetaBool,
str: models.MetaText,
unicode: models.MetaText,
types.NoneType: models.MetaText,
int: models.MetaBigInt,
long: models.MetaBigInt,
float: models.MetaFloat}
def apply_metaquery_filter(session, query, metaquery):
"""Apply provided metaquery filter to existing query.
:param session: session used for original query
:param query: Query instance
:param metaquery: dict with metadata to match on.
"""
for k, v in metaquery.iteritems():
key = k[9:] # strip out 'metadata.' prefix
try:
_model = META_TYPE_MAP[type(v)]
except KeyError:
raise NotImplementedError(_('Query on %(key)s is of %(value)s '
'type and is not supported') %
{"key": k, "value": type(v)})
else:
meta_q = session.query(_model).\
filter(and_(_model.meta_key == key,
_model.value == v)).subquery()
query = query.filter_by(id=meta_q.c.id)
return query
def make_query_from_filter(session, query, sample_filter, require_meter=True):
"""Return a query dictionary based on the settings in the filter.
:param filter: SampleFilter instance
:param require_meter: If true and the filter does not have a meter,
raise an error.
"""
if sample_filter.meter:
query = query.filter(models.Meter.counter_name == sample_filter.meter)
elif require_meter:
raise RuntimeError(_('Missing required meter specifier'))
if sample_filter.source:
query = query.filter(models.Meter.sources.any(id=sample_filter.source))
if sample_filter.start:
ts_start = sample_filter.start
if sample_filter.start_timestamp_op == 'gt':
query = query.filter(models.Meter.timestamp > ts_start)
else:
query = query.filter(models.Meter.timestamp >= ts_start)
if sample_filter.end:
ts_end = sample_filter.end
if sample_filter.end_timestamp_op == 'le':
query = query.filter(models.Meter.timestamp <= ts_end)
else:
query = query.filter(models.Meter.timestamp < ts_end)
if sample_filter.user:
query = query.filter_by(user_id=sample_filter.user)
if sample_filter.project:
query = query.filter_by(project_id=sample_filter.project)
if sample_filter.resource:
query = query.filter_by(resource_id=sample_filter.resource)
if sample_filter.metaquery:
query = apply_metaquery_filter(session, query,
sample_filter.metaquery)
return query
class Connection(base.Connection):
"""SqlAlchemy connection."""
def __init__(self, conf):
url = conf.database.connection
if url == 'sqlite://':
conf.database.connection = \
os.environ.get('CEILOMETER_TEST_SQL_URL', url)
session = sqlalchemy_session.get_session()
engine = session.get_bind()
if isinstance(engine.pool, pool.QueuePool):
poolsize = engine.pool.size() + engine.pool._max_overflow
self.pool = eventlet.GreenPool(poolsize)
else:
self.pool = None
def upgrade(self):
session = sqlalchemy_session.get_session()
migration.db_sync(session.get_bind())
def clear(self):
session = sqlalchemy_session.get_session()
engine = session.get_bind()
for table in reversed(models.Base.metadata.sorted_tables):
engine.execute(table.delete())
@staticmethod
def _create_or_update(session, model_class, _id, source=None, **kwargs):
if not _id:
return None
try:
# create a nested session for the case of two call of
# record_metering_data run in parallel to not fail the
# record of this sample
# (except for sqlite, that doesn't support nested
# transaction and doesn't have concurrency problem)
nested = session.connection().dialect.name != 'sqlite'
# raise dbexc.DBDuplicateEntry manually for sqlite
# to not break the current session
if not nested and session.query(model_class).get(str(_id)):
raise dbexc.DBDuplicateEntry()
with session.begin(nested=nested,
subtransactions=not nested):
obj = model_class(id=str(_id))
session.add(obj)
except dbexc.DBDuplicateEntry:
# requery the object from the db if this is an other
# parallel/previous call of record_metering_data that
# have successfully created this object
obj = session.query(model_class).get(str(_id))
# update the object
if source and not filter(lambda x: x.id == source.id, obj.sources):
obj.sources.append(source)
for k in kwargs:
setattr(obj, k, kwargs[k])
return obj
def record_metering_data(self, data):
if self.pool:
if self.pool.waiting() > 0:
LOG.warn(_("Sqlalchemy connection pool is full, "
"perhaps pool_size should be increased"))
self.pool.spawn(self._real_record_metering_data, data)
else:
self._real_record_metering_data(data)
@classmethod
def _real_record_metering_data(cls, data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.meter.meter_message_from_counter
"""
session = sqlalchemy_session.get_session()
with session.begin():
# Record the updated resource metadata
rmetadata = data['resource_metadata']
source = cls._create_or_update(session, models.Source,
data['source'])
user = cls._create_or_update(session, models.User, data['user_id'],
source)
project = cls._create_or_update(session, models.Project,
data['project_id'], source)
resource = cls._create_or_update(session, models.Resource,
data['resource_id'], source,
user=user, project=project,
resource_metadata=rmetadata)
# Record the raw data for the meter.
meter = models.Meter(counter_type=data['counter_type'],
counter_unit=data['counter_unit'],
counter_name=data['counter_name'],
resource=resource)
session.add(meter)
if not filter(lambda x: x.id == source.id, meter.sources):
meter.sources.append(source)
meter.project = project
meter.user = user
meter.timestamp = data['timestamp']
meter.resource_metadata = rmetadata
meter.counter_volume = data['counter_volume']
meter.message_signature = data['message_signature']
meter.message_id = data['message_id']
session.flush()
if rmetadata:
if isinstance(rmetadata, dict):
for key, v in utils.dict_to_keyval(rmetadata):
try:
_model = META_TYPE_MAP[type(v)]
except KeyError:
LOG.warn(_("Unknown metadata type. Key (%s) will "
"not be queryable."), key)
else:
session.add(_model(id=meter.id,
meta_key=key,
value=v))
@staticmethod
def clear_expired_metering_data(ttl):
"""Clear expired data from the backend storage system according to the
time-to-live.
:param ttl: Number of seconds to keep records for.
"""
session = sqlalchemy_session.get_session()
with session.begin():
end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
meter_query = session.query(models.Meter)\
.filter(models.Meter.timestamp < end)
for meter_obj in meter_query.all():
session.delete(meter_obj)
query = session.query(models.User).filter(
~models.User.id.in_(session.query(models.Meter.user_id)
.group_by(models.Meter.user_id)),
~models.User.id.in_(session.query(models.Alarm.user_id)
.group_by(models.Alarm.user_id)),
~models.User.id.in_(session.query(models.AlarmChange.user_id)
.group_by(models.AlarmChange.user_id))
)
for user_obj in query.all():
session.delete(user_obj)
query = session.query(models.Project)\
.filter(~models.Project.id.in_(
session.query(models.Meter.project_id)
.group_by(models.Meter.project_id)),
~models.Project.id.in_(
session.query(models.Alarm.project_id)
.group_by(models.Alarm.project_id)),
~models.Project.id.in_(
session.query(models.AlarmChange.project_id)
.group_by(models.AlarmChange.project_id)),
~models.Project.id.in_(
session.query(models.AlarmChange.on_behalf_of)
.group_by(models.AlarmChange.on_behalf_of))
)
for project_obj in query.all():
session.delete(project_obj)
query = session.query(models.Resource)\
.filter(~models.Resource.id.in_(
session.query(models.Meter.resource_id).group_by(
models.Meter.resource_id)))
for res_obj in query.all():
session.delete(res_obj)
@staticmethod
def get_users(source=None):
"""Return an iterable of user id strings.
:param source: Optional source filter.
"""
session = sqlalchemy_session.get_session()
query = session.query(models.User.id)
if source is not None:
query = query.filter(models.User.sources.any(id=source))
return (x[0] for x in query.all())
@staticmethod
def get_projects(source=None):
"""Return an iterable of project id strings.
:param source: Optional source filter.
"""
session = sqlalchemy_session.get_session()
query = session.query(models.Project.id)
if source:
query = query.filter(models.Project.sources.any(id=source))
return (x[0] for x in query.all())
@staticmethod
def get_resources(user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
metaquery={}, resource=None, pagination=None):
"""Return an iterable of api_models.Resource instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param start_timestamp_op: Optonal start time operator, like gt, ge.
:param end_timestamp: Optional modified timestamp end range.
:param end_timestamp_op: Optional end time operator, like lt, le.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
:param pagination: Optional pagination query.
"""
# We probably want to raise these early, since we don't know from here
# if they will be handled. We don't want extra wait or work for it to
# just fail.
if pagination:
raise NotImplementedError(_('Pagination not implemented'))
# (thomasm) We need to get the max timestamp first, since that's the
# most accurate. We also need to filter down in the subquery to
# constrain what we have to JOIN on later.
session = sqlalchemy_session.get_session()
ts_subquery = session.query(
models.Meter.resource_id,
func.max(models.Meter.timestamp).label("max_ts"),
func.min(models.Meter.timestamp).label("min_ts")
).group_by(models.Meter.resource_id)
# Here are the basic 'eq' operation filters for the sample data.
for column, value in [(models.Meter.resource_id, resource),
(models.Meter.user_id, user),
(models.Meter.project_id, project)]:
if value:
ts_subquery = ts_subquery.filter(column == value)
if source:
ts_subquery = ts_subquery.filter(
models.Meter.sources.any(id=source))
if metaquery:
ts_subquery = apply_metaquery_filter(session,
ts_subquery,
metaquery)
# Here we limit the samples being used to a specific time period,
# if requested.
if start_timestamp:
if start_timestamp_op == 'gt':
ts_subquery = ts_subquery.filter(
models.Meter.timestamp > start_timestamp)
else:
ts_subquery = ts_subquery.filter(
models.Meter.timestamp >= start_timestamp)
if end_timestamp:
if end_timestamp_op == 'le':
ts_subquery = ts_subquery.filter(
models.Meter.timestamp <= end_timestamp)
else:
ts_subquery = ts_subquery.filter(
models.Meter.timestamp < end_timestamp)
ts_subquery = ts_subquery.subquery()
# Now we need to get the max Meter.id out of the leftover results, to
# break any ties.
agg_subquery = session.query(
func.max(models.Meter.id).label("max_id"),
ts_subquery
).filter(
models.Meter.resource_id == ts_subquery.c.resource_id,
models.Meter.timestamp == ts_subquery.c.max_ts
).group_by(
ts_subquery.c.resource_id,
ts_subquery.c.max_ts,
ts_subquery.c.min_ts
).subquery()
query = session.query(
models.Meter,
agg_subquery.c.min_ts,
agg_subquery.c.max_ts
).filter(
models.Meter.id == agg_subquery.c.max_id
)
for meter, first_ts, last_ts in query.all():
yield api_models.Resource(
resource_id=meter.resource_id,
project_id=meter.project_id,
first_sample_timestamp=first_ts,
last_sample_timestamp=last_ts,
source=meter.sources[0].id,
user_id=meter.user_id,
metadata=meter.resource_metadata,
)
@staticmethod
def get_meters(user=None, project=None, resource=None, source=None,
metaquery={}, pagination=None):
"""Return an iterable of api_models.Meter instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional ID of the resource.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
:param pagination: Optional pagination query.
"""
if pagination:
raise NotImplementedError(_('Pagination not implemented'))
session = sqlalchemy_session.get_session()
# Meter table will store large records and join with resource
# will be very slow.
# subquery_meter is used to reduce meter records
# by selecting a record for each (resource_id, counter_name).
# max() is used to choice a meter record, so the latest record
# is selected for each (resource_id, counter_name).
#
subquery_meter = session.query(func.max(models.Meter.id).label('id'))\
.group_by(models.Meter.resource_id,
models.Meter.counter_name).subquery()
# The SQL of query_meter is essentially:
#
# SELECT meter.* FROM meter INNER JOIN
# (SELECT max(meter.id) AS id FROM meter
# GROUP BY meter.resource_id, meter.counter_name) AS anon_2
# ON meter.id = anon_2.id
#
query_meter = session.query(models.Meter).\
join(subquery_meter, models.Meter.id == subquery_meter.c.id)
if metaquery:
query_meter = apply_metaquery_filter(session,
query_meter,
metaquery)
alias_meter = aliased(models.Meter, query_meter.subquery())
query = session.query(models.Resource, alias_meter).join(
alias_meter, models.Resource.id == alias_meter.resource_id)
if user is not None:
query = query.filter(models.Resource.user_id == user)
if source is not None:
query = query.filter(models.Resource.sources.any(id=source))
if resource:
query = query.filter(models.Resource.id == resource)
if project is not None:
query = query.filter(models.Resource.project_id == project)
for resource, meter in query.all():
yield api_models.Meter(
name=meter.counter_name,
type=meter.counter_type,
unit=meter.counter_unit,
resource_id=resource.id,
project_id=resource.project_id,
source=resource.sources[0].id,
user_id=resource.user_id)
@staticmethod
def get_samples(sample_filter, limit=None):
"""Return an iterable of api_models.Samples.
:param sample_filter: Filter.
:param limit: Maximum number of results to return.
"""
if limit == 0:
return
session = sqlalchemy_session.get_session()
query = session.query(models.Meter)
query = make_query_from_filter(session, query, sample_filter,
require_meter=False)
if limit:
query = query.limit(limit)
samples = query.from_self()\
.order_by(desc(models.Meter.timestamp)).all()
for s in samples:
# Remove the id generated by the database when
# the sample was inserted. It is an implementation
# detail that should not leak outside of the driver.
yield api_models.Sample(
# Replace 'sources' with 'source' to meet the caller's
# expectation, Meter.sources contains one and only one
# source in the current implementation.
source=s.sources[0].id,
counter_name=s.counter_name,
counter_type=s.counter_type,
counter_unit=s.counter_unit,
counter_volume=s.counter_volume,
user_id=s.user_id,
project_id=s.project_id,
resource_id=s.resource_id,
timestamp=s.timestamp,
resource_metadata=s.resource_metadata,
message_id=s.message_id,
message_signature=s.message_signature,
)
@staticmethod
def _make_stats_query(sample_filter, groupby):
select = [
models.Meter.counter_unit.label('unit'),
func.min(models.Meter.timestamp).label('tsmin'),
func.max(models.Meter.timestamp).label('tsmax'),
func.avg(models.Meter.counter_volume).label('avg'),
func.sum(models.Meter.counter_volume).label('sum'),
func.min(models.Meter.counter_volume).label('min'),
func.max(models.Meter.counter_volume).label('max'),
func.count(models.Meter.counter_volume).label('count'),
]
session = sqlalchemy_session.get_session()
if groupby:
group_attributes = [getattr(models.Meter, g) for g in groupby]
select.extend(group_attributes)
query = session.query(*select)
if groupby:
query = query.group_by(*group_attributes)
return make_query_from_filter(session, query, sample_filter)
@staticmethod
def _stats_result_to_model(result, period, period_start,
period_end, groupby):
duration = (timeutils.delta_seconds(result.tsmin, result.tsmax)
if result.tsmin is not None and result.tsmax is not None
else None)
return api_models.Statistics(
unit=result.unit,
count=int(result.count),
min=result.min,
max=result.max,
avg=result.avg,
sum=result.sum,
duration_start=result.tsmin,
duration_end=result.tsmax,
duration=duration,
period=period,
period_start=period_start,
period_end=period_end,
groupby=(dict((g, getattr(result, g)) for g in groupby)
if groupby else None)
)
def get_meter_statistics(self, sample_filter, period=None, groupby=None):
"""Return an iterable of api_models.Statistics instances containing
meter statistics described by the query parameters.
The filter must have a meter value set.
"""
if groupby:
for group in groupby:
if group not in ['user_id', 'project_id', 'resource_id']:
raise NotImplementedError(
_("Unable to group by these fields"))
if not period:
for res in self._make_stats_query(sample_filter, groupby):
if res.count:
yield self._stats_result_to_model(res, 0,
res.tsmin, res.tsmax,
groupby)
return
if not sample_filter.start or not sample_filter.end:
res = self._make_stats_query(sample_filter, None).first()
query = self._make_stats_query(sample_filter, groupby)
# HACK(jd) This is an awful method to compute stats by period, but
# since we're trying to be SQL agnostic we have to write portable
# code, so here it is, admire! We're going to do one request to get
# stats by period. We would like to use GROUP BY, but there's no
# portable way to manipulate timestamp in SQL, so we can't.
for period_start, period_end in base.iter_period(
sample_filter.start or res.tsmin,
sample_filter.end or res.tsmax,
period):
q = query.filter(models.Meter.timestamp >= period_start)
q = q.filter(models.Meter.timestamp < period_end)
for r in q.all():
if r.count:
yield self._stats_result_to_model(
result=r,
period=int(timeutils.delta_seconds(period_start,
period_end)),
period_start=period_start,
period_end=period_end,
groupby=groupby
)
@staticmethod
def _row_to_alarm_model(row):
return api_models.Alarm(alarm_id=row.id,
enabled=row.enabled,
type=row.type,
name=row.name,
description=row.description,
timestamp=row.timestamp,
user_id=row.user_id,
project_id=row.project_id,
state=row.state,
state_timestamp=row.state_timestamp,
ok_actions=row.ok_actions,
alarm_actions=row.alarm_actions,
insufficient_data_actions=
row.insufficient_data_actions,
rule=row.rule,
repeat_actions=row.repeat_actions)
def get_alarms(self, name=None, user=None,
project=None, enabled=None, alarm_id=None, pagination=None):
"""Yields a lists of alarms that match filters
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param enabled: Optional boolean to list disable alarm.
:param alarm_id: Optional alarm_id to return one alarm.
:param pagination: Optional pagination query.
"""
if pagination:
raise NotImplementedError(_('Pagination not implemented'))
session = sqlalchemy_session.get_session()
query = session.query(models.Alarm)
if name is not None:
query = query.filter(models.Alarm.name == name)
if enabled is not None:
query = query.filter(models.Alarm.enabled == enabled)
if user is not None:
query = query.filter(models.Alarm.user_id == user)
if project is not None:
query = query.filter(models.Alarm.project_id == project)
if alarm_id is not None:
query = query.filter(models.Alarm.id == alarm_id)
return (self._row_to_alarm_model(x) for x in query.all())
def create_alarm(self, alarm):
"""Create an alarm.
:param alarm: The alarm to create.
"""
session = sqlalchemy_session.get_session()
with session.begin():
Connection._create_or_update(session, models.User,
alarm.user_id)
Connection._create_or_update(session, models.Project,
alarm.project_id)
alarm_row = models.Alarm(id=alarm.alarm_id)
alarm_row.update(alarm.as_dict())
session.add(alarm_row)
return self._row_to_alarm_model(alarm_row)
def update_alarm(self, alarm):
"""Update an alarm.
:param alarm: the new Alarm to update
"""
session = sqlalchemy_session.get_session()
with session.begin():
Connection._create_or_update(session, models.User,
alarm.user_id)
Connection._create_or_update(session, models.Project,
alarm.project_id)
alarm_row = session.merge(models.Alarm(id=alarm.alarm_id))
alarm_row.update(alarm.as_dict())
return self._row_to_alarm_model(alarm_row)
@staticmethod
def delete_alarm(alarm_id):
"""Delete a alarm
:param alarm_id: ID of the alarm to delete
"""
session = sqlalchemy_session.get_session()
with session.begin():
session.query(models.Alarm).filter(
models.Alarm.id == alarm_id).delete()
@staticmethod
def _row_to_alarm_change_model(row):
return api_models.AlarmChange(event_id=row.event_id,
alarm_id=row.alarm_id,
type=row.type,
detail=row.detail,
user_id=row.user_id,
project_id=row.project_id,
on_behalf_of=row.on_behalf_of,
timestamp=row.timestamp)
def get_alarm_changes(self, alarm_id, on_behalf_of,
user=None, project=None, type=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None):
"""Yields list of AlarmChanges describing alarm history
Changes are always sorted in reverse order of occurrence, given
the importance of currency.
Segregation for non-administrative users is done on the basis
of the on_behalf_of parameter. This allows such users to have
visibility on both the changes initiated by themselves directly
(generally creation, rule changes, or deletion) and also on those
changes initiated on their behalf by the alarming service (state
transitions after alarm thresholds are crossed).
:param alarm_id: ID of alarm to return changes for
:param on_behalf_of: ID of tenant to scope changes query (None for
administrative user, indicating all projects)
:param user: Optional ID of user to return changes for
:param project: Optional ID of project to return changes for
:project type: Optional change type
:param start_timestamp: Optional modified timestamp start range
:param start_timestamp_op: Optional timestamp start range operation
:param end_timestamp: Optional modified timestamp end range
:param end_timestamp_op: Optional timestamp end range operation
"""
session = sqlalchemy_session.get_session()
query = session.query(models.AlarmChange)
query = query.filter(models.AlarmChange.alarm_id == alarm_id)
if on_behalf_of is not None:
query = query.filter(
models.AlarmChange.on_behalf_of == on_behalf_of)
if user is not None:
query = query.filter(models.AlarmChange.user_id == user)
if project is not None:
query = query.filter(models.AlarmChange.project_id == project)
if type is not None:
query = query.filter(models.AlarmChange.type == type)
if start_timestamp:
if start_timestamp_op == 'gt':
query = query.filter(
models.AlarmChange.timestamp > start_timestamp)
else:
query = query.filter(
models.AlarmChange.timestamp >= start_timestamp)
if end_timestamp:
if end_timestamp_op == 'le':
query = query.filter(
models.AlarmChange.timestamp <= end_timestamp)
else:
query = query.filter(
models.AlarmChange.timestamp < end_timestamp)
query = query.order_by(desc(models.AlarmChange.timestamp))
return (self._row_to_alarm_change_model(x) for x in query.all())
def record_alarm_change(self, alarm_change):
"""Record alarm change event.
"""
session = sqlalchemy_session.get_session()
with session.begin():
Connection._create_or_update(session, models.User,
alarm_change['user_id'])
Connection._create_or_update(session, models.Project,
alarm_change['project_id'])
Connection._create_or_update(session, models.Project,
alarm_change['on_behalf_of'])
alarm_change_row = models.AlarmChange(
event_id=alarm_change['event_id'])
alarm_change_row.update(alarm_change)
session.add(alarm_change_row)
@staticmethod
def _get_or_create_trait_type(trait_type, data_type, session=None):
"""Find if this trait already exists in the database, and
if it does not, create a new entry in the trait type table.
"""
if session is None:
session = sqlalchemy_session.get_session()
with session.begin(subtransactions=True):
tt = session.query(models.TraitType).filter(
models.TraitType.desc == trait_type,
models.TraitType.data_type == data_type).first()
if not tt:
tt = models.TraitType(trait_type, data_type)
session.add(tt)
return tt
@classmethod
def _make_trait(cls, trait_model, event, session=None):
"""Make a new Trait from a Trait model.
Doesn't flush or add to session.
"""
trait_type = cls._get_or_create_trait_type(trait_model.name,
trait_model.dtype,
session)
value_map = models.Trait._value_map
values = {'t_string': None, 't_float': None,
't_int': None, 't_datetime': None}
value = trait_model.value
values[value_map[trait_model.dtype]] = value
return models.Trait(trait_type, event, **values)
@staticmethod
def _get_or_create_event_type(event_type, session=None):
"""Here, we check to see if an event type with the supplied
name already exists. If not, we create it and return the record.
This may result in a flush.
"""
if session is None:
session = sqlalchemy_session.get_session()
with session.begin(subtransactions=True):
et = session.query(models.EventType).filter(
models.EventType.desc == event_type).first()
if not et:
et = models.EventType(event_type)
session.add(et)
return et
@classmethod
def _record_event(cls, session, event_model):
"""Store a single Event, including related Traits.
"""
with session.begin(subtransactions=True):
event_type = cls._get_or_create_event_type(event_model.event_type,
session=session)
event = models.Event(event_model.message_id, event_type,
event_model.generated)
session.add(event)
new_traits = []
if event_model.traits:
for trait in event_model.traits:
t = cls._make_trait(trait, event, session=session)
session.add(t)
new_traits.append(t)
# Note: we don't flush here, explicitly (unless a new trait or event
# does it). Otherwise, just wait until all the Events are staged.
return (event, new_traits)
def record_events(self, event_models):
"""Write the events to SQL database via sqlalchemy.
:param event_models: a list of model.Event objects.
Returns a list of events that could not be saved in a
(reason, event) tuple. Reasons are enumerated in
storage.model.Event
Flush when they're all added, unless new EventTypes or
TraitTypes are added along the way.
"""
session = sqlalchemy_session.get_session()
events = []
problem_events = []
for event_model in event_models:
event = None
try:
with session.begin():
event = self._record_event(session, event_model)
except dbexc.DBDuplicateEntry:
problem_events.append((api_models.Event.DUPLICATE,
event_model))
except Exception as e:
LOG.exception(_('Failed to record event: %s') % e)
problem_events.append((api_models.Event.UNKNOWN_PROBLEM,
event_model))
events.append(event)
return problem_events
def get_events(self, event_filter):
"""Return an iterable of model.Event objects.
:param event_filter: EventFilter instance
"""
start = event_filter.start_time
end = event_filter.end_time
session = sqlalchemy_session.get_session()
LOG.debug(_("Getting events that match filter: %s") % event_filter)
with session.begin():
event_query = session.query(models.Event)
# Build up the join conditions
event_join_conditions = [models.EventType.id ==
models.Event.event_type_id]
if event_filter.event_type:
event_join_conditions\
.append(models.EventType.desc == event_filter.event_type)
event_query = event_query.join(models.EventType,
and_(*event_join_conditions))
# Build up the where conditions
event_filter_conditions = []
if event_filter.message_id:
event_filter_conditions\
.append(models.Event.message_id == event_filter.message_id)
if start:
event_filter_conditions.append(models.Event.generated >= start)
if end:
event_filter_conditions.append(models.Event.generated <= end)
if event_filter_conditions:
event_query = event_query\
.filter(and_(*event_filter_conditions))
event_models_dict = {}
if event_filter.traits_filter:
for trait_filter in event_filter.traits_filter:
# Build a sub query that joins Trait to TraitType
# where the trait name matches
trait_name = trait_filter.pop('key')
conditions = [models.Trait.trait_type_id ==
models.TraitType.id,
models.TraitType.desc == trait_name]
for key, value in trait_filter.iteritems():
if key == 'string':
conditions.append(models.Trait.t_string == value)
elif key == 'integer':
conditions.append(models.Trait.t_int == value)
elif key == 'datetime':
conditions.append(models.Trait.t_datetime == value)
elif key == 'float':
conditions.append(models.Trait.t_float == value)
trait_query = session.query(models.Trait.event_id)\
.join(models.TraitType, and_(*conditions)).subquery()
event_query = event_query\
.join(trait_query,
models.Event.id == trait_query.c.event_id)
else:
# If there are no trait filters, grab the events from the db
query = session.query(models.Event.id,
models.Event.generated,
models.Event.message_id,
models.EventType.desc)\
.join(models.EventType,
and_(*event_join_conditions))
if event_filter_conditions:
query = query.filter(and_(*event_filter_conditions))
for (id, generated, message_id, desc) in query.all():
event_models_dict[id] = api_models.Event(message_id,
desc,
generated,
[])
# Build event models for the events
event_query = event_query.subquery()
query = session.query(models.Trait)\
.join(models.TraitType,
models.Trait.trait_type_id == models.TraitType.id)\
.join(event_query, models.Trait.event_id == event_query.c.id)
# Now convert the sqlalchemy objects back into Models ...
for trait in query.all():
event = event_models_dict.get(trait.event_id)
if not event:
event = api_models.Event(
trait.event.message_id,
trait.event.event_type.desc,
trait.event.generated, [])
event_models_dict[trait.event_id] = event
trait_model = api_models.Trait(trait.trait_type.desc,
trait.trait_type.data_type,
trait.get_value())
event.append_trait(trait_model)
event_models = event_models_dict.values()
return sorted(event_models, key=operator.attrgetter('generated'))
@staticmethod
def get_event_types():
"""Return all event types as an iterable of strings.
"""
session = sqlalchemy_session.get_session()
with session.begin():
query = session.query(models.EventType.desc)\
.order_by(models.EventType.desc)
for name in query.all():
# The query returns a tuple with one element.
yield name[0]
@staticmethod
def get_trait_types(event_type):
"""Return a dictionary containing the name and data type of
the trait type. Only trait types for the provided event_type are
returned.
:param event_type: the type of the Event
"""
session = sqlalchemy_session.get_session()
LOG.debug(_("Get traits for %s") % event_type)
with session.begin():
query = (session.query(models.TraitType.desc,
models.TraitType.data_type)
.join(models.Trait,
models.Trait.trait_type_id ==
models.TraitType.id)
.join(models.Event,
models.Event.id ==
models.Trait.event_id)
.join(models.EventType,
and_(models.EventType.id ==
models.Event.id,
models.EventType.desc ==
event_type))
.group_by(models.TraitType.desc,
models.TraitType.data_type)
.distinct())
for desc, type in query.all():
yield {'name': desc, 'data_type': type}
@staticmethod
def get_traits(event_type, trait_type=None):
"""Return all trait instances associated with an event_type. If
trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_type: the name of the Trait to filter by
"""
session = sqlalchemy_session.get_session()
with session.begin():
trait_type_filters = [models.TraitType.id ==
models.Trait.trait_type_id]
if trait_type:
trait_type_filters.append(models.TraitType.desc == trait_type)
query = (session.query(models.Trait)
.join(models.TraitType, and_(*trait_type_filters))
.join(models.Event,
models.Event.id == models.Trait.event_id)
.join(models.EventType,
and_(models.EventType.id ==
models.Event.event_type_id,
models.EventType.desc == event_type)))
for trait in query.all():
type = trait.trait_type
yield api_models.Trait(name=type.desc,
dtype=type.data_type,
value=trait.get_value())
| |
#!/usr/bin/env python
import sys
import numpy as np
from sklearn import tree
from sklearn.metrics import accuracy_score
from sklearn import cross_validation
# Contains data about the match and each of the players in the game.
class Match():
def __init__(self, matchId, matchDuration = None, queueType = None, mapId = None):
# Map data
self.matchId = matchId
self.matchDuration = matchDuration # length of match in seconds
self.queueType = queueType # type of queue (e.g. ranked solo, normals, etc.)
self.mapId = mapId # type of map
# Player data - each list contains 5 Player classes
self.team1 = []
self.team1Won = None
self.team2 = []
self.team2Won = None
def __str__(self):
s = 'Match ' + str(self.matchId)
return s
# Contains data about a specific player
class Player():
def __init__(self, summonerId, championId):
# Current game stats
self.summonerId = summonerId
self.championId = championId
self.participantId = None # player number (= any int value from 1 to 10)
self.spell1Id = None # first summoner spell id
self.spell2Id = None # second summoner spell id
# One completed game
self.won = None
self.kills = None
self.deaths = None
self.assists = None
self.totalDamageDealtToChampions = None
self.wardsPlaced = None
self.wardsKilled = None
self.inhibitorKills = None
self.towerKills = None
self.matchHistoryUrl = None
# Champ history
self.kda = None
self.winrate = None
self.mlvl = None
# Player history
self.highestAchievedSeasonTier = None # Highest rank achieved
# Add stats for the player from a completed game
def addTestingGameData(self, participantId, spell1Id, spell2Id, highestAchievedSeasonTier, won,
kills, deaths, assists, totalDamageDealtToChampions, wardsPlaced,
wardsKilled, inhibitorKills, towerKills, matchHistoryUrl):
self.participantId = participantId
self.spell1Id = spell1Id
self.spell2Id = spell2Id
rankInt = None # Default is None which means unknown rank
if (highestAchievedSeasonTier == 'UNRANKED'):
rankInt = 0
elif (highestAchievedSeasonTier == 'BRONZE'):
rankInt = 1
elif (highestAchievedSeasonTier == 'SILVER'):
rankInt = 2
elif (highestAchievedSeasonTier == 'GOLD'):
rankInt = 3
elif (highestAchievedSeasonTier == 'PLATINUM'):
rankInt = 4
elif (highestAchievedSeasonTier == 'DIAMOND'):
rankInt = 5
elif (highestAchievedSeasonTier == 'MASTER'):
rankInt = 6
elif (highestAchievedSeasonTier == 'CHALLENGER'):
rankInt = 7
self.highestAchievedSeasonTier = rankInt
self.won = won
self.kills = kills
self.deaths = deaths
self.assists = assists
self.totalDamageDealtToChampions = totalDamageDealtToChampions
self.wardsPlaced = wardsPlaced
self.wardsKilled = wardsKilled
self.inhibitorKills = inhibitorKills
self.towerKills = towerKills
self.matchHistoryUrl = matchHistoryUrl
# Add stats based on the player's history
def addTrainingGameData(self, kda, winrate, mlvl):
self.kda = kda
self.winrate = winrate
self.mlvl = mlvl
def __str__(self):
s = 'Summoner ' + str(self.summonerId)
s += ' played champion ' + str(self.championId)
return s
# Takes the data from matches.csv and converts it into a list of matches
def getCurrentGameDataFromCSV(matches):
print 'Reading data from matches.csv'
# mydata = np.recfromcsv('matches.csv', delimiter=',');
mydata = np.recfromcsv('matches.csv', delimiter=',', filling_values=np.nan, case_sensitive=True,
deletechars='', replace_space=' ')
sumid = 'summonerId'
cid = 'championId'
pid = 'participantId'
sid1 = 'spell1Id'
sid2 = 'spell2Id'
rank = 'highestAchievedSeasonTier'
won = 'winner'
kills = 'kills'
deaths = 'deaths'
assists = 'assists'
dmg = 'totalDamageDealtToChampions'
wp = 'wardsPlaced'
wk = 'wardsKilled'
ik = 'inhibitorKills'
tk = 'towerKills'
url = 'matchHistoryUri'
for oneGame in mydata:
m = Match(oneGame['matchId'], oneGame['matchDuration'], oneGame['queueType'], oneGame['mapId'])
# Add all player data to players list (training data)
for i in range(1, 11):
p = Player(oneGame[sumid + str(i)], oneGame[cid + str(i)])
p.addTestingGameData(oneGame[pid + str(i)], oneGame[sid1 + str(i)], oneGame[sid2 + str(i)],
oneGame[rank + str(i)], oneGame[won + str(i)], oneGame[kills + str(i)],
oneGame[deaths + str(i)], oneGame[assists + str(i)],
oneGame[dmg + str(i)], oneGame[wp + str(i)], oneGame[wk + str(i)],
oneGame[ik + str(i)], oneGame[tk + str(i)], oneGame[url + str(i)])
if (i <= 5):
m.team1.append(p)
else:
m.team2.append(p)
m.team1Won = m.team1[0].won
m.team2Won = m.team2[0].won
assert m.team1Won != m.team2Won, str('Both team 1 won and team 2 won for match ', m.matchId)
matches[m.matchId] = m
print 'Finished parsing data from matches.csv'
# Takes the data from ParticipantInfo.csv and converts it into a list of matches
def getDataFromParticipantInfoCSV(matches):
print 'Reading data from ParticipantInfo.csv'
mydata = np.recfromcsv('ParticipantInfo.csv', delimiter=',', filling_values=np.nan, case_sensitive=True,
deletechars='', replace_space=' ')
sumid = 'summonerId'
cid = 'championId'
kda = 'kda'
winrate = 'winrate'
mlvl = 'masteryLevel'
m = None # Current match
currParticipant = 10
# Add all player data for each match
# First 5 players are team 1, next 5 players are team 2
for onePlayer in mydata:
if (currParticipant == 10):
currParticipant = 0
if (m is not None):
matches.append(m)
m = Match(onePlayer['matchId'])
p = Player(onePlayer[sumid], onePlayer[cid])
p.addTrainingGameData(onePlayer[kda], onePlayer[winrate], onePlayer[mlvl])
if (currParticipant < 5):
m.team1.append(p)
else:
m.team2.append(p)
currParticipant += 1
print 'Finished parsing data from ParticipantInfo.csv'
# Add training data
def addDataToClf(data, result, players):
for p in players:
data.append((p.kills, p.deaths, p.assists))
result.append((p.winner))
# Splits list l into n size lists
def splitList(l, n):
return [l[x : x + n] for x in xrange(0, len(l), n)]
# Add training data
def addDataToClf1(data, results, players, win):
data.append(calculateAttributes(players))
results.append(win)
# Add training data - using both teams
def addDataToClf2(data, results, team1, team2, win):
t1 = calculateAttributes(team1)
t2 = calculateAttributes(team2)
data.append([t1[0] - t2[0], t1[1] - t2[1], t1[2] - t2[2]])
results.append(win)
# Returns a list of data used in classifier
def calculateAttributes(players):
avgKDA = 0.0
avgWinrate = 0.0
avgMasteryLvl = 0.0
for p in players:
avgKDA += p.kda
avgWinrate += p.winrate
avgMasteryLvl += p.mlvl
avgKDA /= 5.0
avgWinrate /= 5.0
avgMasteryLvl /= 5.0
return [avgKDA, avgWinrate, avgMasteryLvl]
# Uses a decision tree based on KDA, Win rate, Mastery points
def DecisionTreeClassifer1(testingMatches, trainingMatches):
data = [] # Contains data we are making decisions on - method 1
results = [] # True if team won and false if team lost
for i in range(0, len(trainingMatches)):
m = trainingMatches[i]
# Looks up result from testingMatches
addDataToClf1(data, results, m.team1, testingMatches[m.matchId].team1Won)
addDataToClf1(data, results, m.team2, testingMatches[m.matchId].team2Won)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(data, results)
# Cross validation - use each set as training/testing
scores = cross_validation.cross_val_score(clf, data, results, cv=10)
print 'accuracy with 10-fold cv:'
print 'method 1 (looking at single team):', scores.mean()
for i in range(0, len(trainingMatches)):
m = trainingMatches[i]
# Looks up result from testingMatches
addDataToClf2(data, results, m.team1, m.team2, testingMatches[m.matchId].team1Won)
addDataToClf2(data, results, m.team2, m.team1, testingMatches[m.matchId].team2Won)
scores = cross_validation.cross_val_score(clf, data, results, cv=10)
print 'method 1 (looking at both teams):', scores.mean()
'''
trainingMatches = splitList(trainingMatches, 100) # split matches into 9 lists of size 1000
# Only using first set as training and rest as testing
trainingSet = trainingMatches[0]
testingSet = trainingMatches[1:]
for i in range(0, len(trainingSet)):
m = trainingSet[i]
# Looks up result from testingMatches
addDataToClf1(data, results, m.team1, testingMatches[m.matchId].team1Won)
addDataToClf1(data, results, m.team2, testingMatches[m.matchId].team2Won)
print data[0]
clf = tree.DecisionTreeClassifier()
clf = clf.fit(data, results)
predictedResults = []
for test in testingSet:
for m in test:
teams = [m.team1, m.team2]
for t in teams:
prediction = clf.predict([calculateAttributes(t)])
predictedResults.append(prediction)
trueResults = []
for test in testingSet:
for m in test:
trueResults.append(testingMatches[m.matchId].team1Won)
trueResults.append(testingMatches[m.matchId].team2Won)
print 'accuracy: ', accuracy_score(trueResults, predictedResults)
'''
if __name__ == '__main__':
testingMatches = {} # Contains dictionary of matches from matches.csv with results
trainingMatches = [] # Contains list of matches from ParticipantInfo.csv were result is unknown
getCurrentGameDataFromCSV(testingMatches)
getDataFromParticipantInfoCSV(trainingMatches)
DecisionTreeClassifer1(testingMatches, trainingMatches)
| |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import sys
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _
from nova import utils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.StrOpt('instances_path_share',
default="",
help='The name of a Windows share name mapped to the '
'"instances_path" dir and used by the resize feature '
'to copy files to the target host. If left blank, an '
'administrative share will be used, looking for the same '
'"instances_path" used locally'),
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('instances_path', 'nova.compute.manager')
ERROR_INVALID_NAME = 123
class PathUtils(object):
def __init__(self):
self._smb_conn = wmi.WMI(moniker=r"root\Microsoft\Windows\SMB")
def open(self, path, mode):
"""Wrapper on __builtin__.open used to simplify unit testing."""
import __builtin__
return __builtin__.open(path, mode)
def exists(self, path):
return os.path.exists(path)
def makedirs(self, path):
os.makedirs(path)
def remove(self, path):
os.remove(path)
def rename(self, src, dest):
os.rename(src, dest)
def copyfile(self, src, dest):
self.copy(src, dest)
def copy(self, src, dest):
# With large files this is 2x-3x faster than shutil.copy(src, dest),
# especially when copying to a UNC target.
# shutil.copyfileobj(...) with a proper buffer is better than
# shutil.copy(...) but still 20% slower than a shell copy.
# It can be replaced with Win32 API calls to avoid the process
# spawning overhead.
output, ret = utils.execute('cmd.exe', '/C', 'copy', '/Y', src, dest)
if ret:
raise IOError(_('The file copy from %(src)s to %(dest)s failed')
% {'src': src, 'dest': dest})
def rmtree(self, path):
shutil.rmtree(path)
def get_instances_dir(self, remote_server=None):
local_instance_path = os.path.normpath(CONF.instances_path)
if remote_server:
if CONF.hyperv.instances_path_share:
path = CONF.hyperv.instances_path_share
else:
# Use an administrative share
path = local_instance_path.replace(':', '$')
return ('\\\\%(remote_server)s\\%(path)s' %
{'remote_server': remote_server, 'path': path})
else:
return local_instance_path
def _check_create_dir(self, path):
if not self.exists(path):
LOG.debug('Creating directory: %s', path)
self.makedirs(path)
def _check_remove_dir(self, path):
if self.exists(path):
LOG.debug('Removing directory: %s', path)
self.rmtree(path)
def _get_instances_sub_dir(self, dir_name, remote_server=None,
create_dir=True, remove_dir=False):
instances_path = self.get_instances_dir(remote_server)
path = os.path.join(instances_path, dir_name)
try:
if remove_dir:
self._check_remove_dir(path)
if create_dir:
self._check_create_dir(path)
return path
except WindowsError as ex:
if ex.winerror == ERROR_INVALID_NAME:
raise vmutils.HyperVException(_(
"Cannot access \"%(instances_path)s\", make sure the "
"path exists and that you have the proper permissions. "
"In particular Nova-Compute must not be executed with the "
"builtin SYSTEM account or other accounts unable to "
"authenticate on a remote host.") %
{'instances_path': instances_path})
raise
def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
remove_dir=False):
dir_name = '%s_revert' % instance_name
return self._get_instances_sub_dir(dir_name, None, create_dir,
remove_dir)
def get_instance_dir(self, instance_name, remote_server=None,
create_dir=True, remove_dir=False):
return self._get_instances_sub_dir(instance_name, remote_server,
create_dir, remove_dir)
def _lookup_vhd_path(self, instance_name, vhd_path_func):
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = vhd_path_func(instance_name, format_ext)
if self.exists(test_path):
vhd_path = test_path
break
return vhd_path
def lookup_root_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name, self.get_root_vhd_path)
def lookup_configdrive_path(self, instance_name):
configdrive_path = None
for format_ext in constants.DISK_FORMAT_MAP:
test_path = self.get_configdrive_path(instance_name, format_ext)
if self.exists(test_path):
configdrive_path = test_path
break
return configdrive_path
def lookup_ephemeral_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name,
self.get_ephemeral_vhd_path)
def get_root_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'root.' + format_ext.lower())
def get_configdrive_path(self, instance_name, format_ext,
remote_server=None):
instance_path = self.get_instance_dir(instance_name, remote_server)
return os.path.join(instance_path, 'configdrive.' + format_ext.lower())
def get_ephemeral_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'ephemeral.' + format_ext.lower())
def get_base_vhd_dir(self):
return self._get_instances_sub_dir('_base')
def get_export_dir(self, instance_name):
dir_name = os.path.join('export', instance_name)
return self._get_instances_sub_dir(dir_name, create_dir=True,
remove_dir=True)
def get_vm_console_log_paths(self, vm_name, remote_server=None):
instance_dir = self.get_instance_dir(vm_name,
remote_server)
console_log_path = os.path.join(instance_dir, 'console.log')
return console_log_path, console_log_path + '.1'
def check_smb_mapping(self, smbfs_share):
mappings = self._smb_conn.Msft_SmbMapping(RemotePath=smbfs_share)
if not mappings:
return False
if os.path.exists(smbfs_share):
LOG.debug('Share already mounted: %s', smbfs_share)
return True
else:
LOG.debug('Share exists but is unavailable: %s ', smbfs_share)
self.unmount_smb_share(smbfs_share, force=True)
return False
def mount_smb_share(self, smbfs_share, username=None, password=None):
try:
LOG.debug('Mounting share: %s', smbfs_share)
self._smb_conn.Msft_SmbMapping.Create(RemotePath=smbfs_share,
UserName=username,
Password=password)
except wmi.x_wmi as exc:
err_msg = (_(
'Unable to mount SMBFS share: %(smbfs_share)s '
'WMI exception: %(wmi_exc)s'), {'smbfs_share': smbfs_share,
'wmi_exc': exc})
raise vmutils.HyperVException(err_msg)
def unmount_smb_share(self, smbfs_share, force=False):
mappings = self._smb_conn.Msft_SmbMapping(RemotePath=smbfs_share)
if not mappings:
LOG.debug('Share %s is not mounted. Skipping unmount.',
smbfs_share)
for mapping in mappings:
# Due to a bug in the WMI module, getting the output of
# methods returning None will raise an AttributeError
try:
mapping.Remove(Force=force)
except AttributeError:
pass
except wmi.x_wmi:
# If this fails, a 'Generic Failure' exception is raised.
# This happens even if we unforcefully unmount an in-use
# share, for which reason we'll simply ignore it in this
# case.
if force:
raise vmutils.HyperVException(
_("Could not unmount share: %s"), smbfs_share)
def copy_configdrive(self, instance_name, dest_host):
local_configdrive_path = self.get_configdrive_path(
instance_name, constants.DVD_FORMAT)
remote_configdrive_path = self.get_configdrive_path(
instance_name, constants.DVD_FORMAT, remote_server=dest_host)
self.copyfile(local_configdrive_path,
remote_configdrive_path)
| |
import contextlib
import sys
import os
import itertools
import hashlib
import queue
import random
import select
import time
import OpenSSL.crypto
import logging
from mitmproxy.test.tutils import treq
from mitmproxy.utils import strutils
from mitmproxy.net import tcp
from mitmproxy import certs
from mitmproxy.net import websockets
from mitmproxy.net import socks
from mitmproxy import exceptions
from mitmproxy.net.http import http1
from mitmproxy.types import basethread
from pathod import log
from pathod import language
from pathod.protocols import http2
logging.getLogger("hpack").setLevel(logging.WARNING)
def xrepr(s):
return repr(s)[1:-1]
class PathocError(Exception):
pass
class SSLInfo:
def __init__(self, certchain, cipher, alp):
self.certchain, self.cipher, self.alp = certchain, cipher, alp
def __str__(self):
parts = [
"Application Layer Protocol: %s" % strutils.always_str(self.alp, "utf8"),
"Cipher: %s, %s bit, %s" % self.cipher,
"SSL certificate chain:"
]
for n, i in enumerate(self.certchain):
parts.append(" Certificate [%s]" % n)
parts.append("\tSubject: ")
for cn in i.get_subject().get_components():
parts.append("\t\t%s=%s" % (
strutils.always_str(cn[0], "utf8"),
strutils.always_str(cn[1], "utf8"))
)
parts.append("\tIssuer: ")
for cn in i.get_issuer().get_components():
parts.append("\t\t%s=%s" % (
strutils.always_str(cn[0], "utf8"),
strutils.always_str(cn[1], "utf8"))
)
parts.extend(
[
"\tVersion: %s" % i.get_version(),
"\tValidity: %s - %s" % (
strutils.always_str(i.get_notBefore(), "utf8"),
strutils.always_str(i.get_notAfter(), "utf8")
),
"\tSerial: %s" % i.get_serial_number(),
"\tAlgorithm: %s" % strutils.always_str(i.get_signature_algorithm(), "utf8")
]
)
pk = i.get_pubkey()
types = {
OpenSSL.crypto.TYPE_RSA: "RSA",
OpenSSL.crypto.TYPE_DSA: "DSA"
}
t = types.get(pk.type(), "Uknown")
parts.append("\tPubkey: %s bit %s" % (pk.bits(), t))
s = certs.SSLCert(i)
if s.altnames:
parts.append("\tSANs: %s" % " ".join(strutils.always_str(n, "utf8") for n in s.altnames))
return "\n".join(parts)
class WebsocketFrameReader(basethread.BaseThread):
def __init__(
self,
rfile,
logfp,
showresp,
hexdump,
ws_read_limit,
timeout
):
basethread.BaseThread.__init__(self, "WebsocketFrameReader")
self.timeout = timeout
self.ws_read_limit = ws_read_limit
self.logfp = logfp
self.showresp = showresp
self.hexdump = hexdump
self.rfile = rfile
self.terminate = queue.Queue()
self.frames_queue = queue.Queue()
self.logger = log.ConnectionLogger(
self.logfp,
self.hexdump,
False,
rfile if showresp else None,
None
)
@contextlib.contextmanager
def terminator(self):
yield
self.frames_queue.put(None)
def run(self):
starttime = time.time()
with self.terminator():
while True:
if self.ws_read_limit == 0:
return
try:
r, _, _ = select.select([self.rfile], [], [], 0.05)
except OSError:
return
delta = time.time() - starttime
if not r and self.timeout and delta > self.timeout:
return
try:
self.terminate.get_nowait()
return
except queue.Empty:
pass
for rfile in r:
with self.logger.ctx() as log:
try:
frm = websockets.Frame.from_file(self.rfile)
except exceptions.TcpDisconnect:
return
self.frames_queue.put(frm)
log("<< %s" % repr(frm.header))
if self.ws_read_limit is not None:
self.ws_read_limit -= 1
starttime = time.time()
class Pathoc(tcp.TCPClient):
def __init__(
self,
address,
# SSL
ssl=None,
sni=None,
ssl_version=tcp.SSL_DEFAULT_METHOD,
ssl_options=tcp.SSL_DEFAULT_OPTIONS,
clientcert=None,
ciphers=None,
# HTTP/2
use_http2=False,
http2_skip_connection_preface=False,
http2_framedump=False,
# Websockets
ws_read_limit=None,
# Network
timeout=None,
# Output control
showreq=False,
showresp=False,
explain=False,
hexdump=False,
ignorecodes=(),
ignoretimeout=False,
showsummary=False,
fp=sys.stdout
):
"""
spec: A request specification
showreq: Print requests
showresp: Print responses
explain: Print request explanation
showssl: Print info on SSL connection
hexdump: When printing requests or responses, use hex dump output
showsummary: Show a summary of requests
ignorecodes: Sequence of return codes to ignore
"""
tcp.TCPClient.__init__(self, address)
self.ssl, self.sni = ssl, sni
self.clientcert = clientcert
self.ssl_version = ssl_version
self.ssl_options = ssl_options
self.ciphers = ciphers
self.sslinfo = None
self.use_http2 = use_http2
self.http2_skip_connection_preface = http2_skip_connection_preface
self.http2_framedump = http2_framedump
self.ws_read_limit = ws_read_limit
self.timeout = timeout
self.showreq = showreq
self.showresp = showresp
self.explain = explain
self.hexdump = hexdump
self.ignorecodes = ignorecodes
self.ignoretimeout = ignoretimeout
self.showsummary = showsummary
self.fp = fp
self.ws_framereader = None
if self.use_http2:
if not tcp.HAS_ALPN: # pragma: no cover
log.write_raw(
self.fp,
"HTTP/2 requires ALPN support. "
"Please use OpenSSL >= 1.0.2. "
"Pathoc might not be working as expected without ALPN.",
timestamp=False
)
self.protocol = http2.HTTP2StateProtocol(self, dump_frames=self.http2_framedump)
else:
self.protocol = http1
self.settings = language.Settings(
is_client=True,
staticdir=os.getcwd(),
unconstrained_file_access=True,
request_host=self.address.host,
protocol=self.protocol,
)
def http_connect(self, connect_to):
self.wfile.write(
b'CONNECT %s:%d HTTP/1.1\r\n' % (connect_to[0].encode("idna"), connect_to[1]) +
b'\r\n'
)
self.wfile.flush()
try:
resp = self.protocol.read_response(self.rfile, treq(method=b"CONNECT"))
if resp.status_code != 200:
raise exceptions.HttpException("Unexpected status code: %s" % resp.status_code)
except exceptions.HttpException as e:
raise PathocError(
"Proxy CONNECT failed: %s" % repr(e)
)
def socks_connect(self, connect_to):
try:
client_greet = socks.ClientGreeting(
socks.VERSION.SOCKS5,
[socks.METHOD.NO_AUTHENTICATION_REQUIRED]
)
client_greet.to_file(self.wfile)
self.wfile.flush()
server_greet = socks.ServerGreeting.from_file(self.rfile)
server_greet.assert_socks5()
if server_greet.method != socks.METHOD.NO_AUTHENTICATION_REQUIRED:
raise socks.SocksError(
socks.METHOD.NO_ACCEPTABLE_METHODS,
"pathoc only supports SOCKS without authentication"
)
connect_request = socks.Message(
socks.VERSION.SOCKS5,
socks.CMD.CONNECT,
socks.ATYP.DOMAINNAME,
tcp.Address.wrap(connect_to)
)
connect_request.to_file(self.wfile)
self.wfile.flush()
connect_reply = socks.Message.from_file(self.rfile)
connect_reply.assert_socks5()
if connect_reply.msg != socks.REP.SUCCEEDED:
raise socks.SocksError(
connect_reply.msg,
"SOCKS server error"
)
except (socks.SocksError, exceptions.TcpDisconnect) as e:
raise PathocError(str(e))
def connect(self, connect_to=None, showssl=False, fp=sys.stdout):
"""
connect_to: A (host, port) tuple, which will be connected to with
an HTTP CONNECT request.
"""
if self.use_http2 and not self.ssl:
raise NotImplementedError("HTTP2 without SSL is not supported.")
with tcp.TCPClient.connect(self) as closer:
if connect_to:
self.http_connect(connect_to)
self.sslinfo = None
if self.ssl:
try:
alpn_protos = [b'http/1.1']
if self.use_http2:
alpn_protos.append(b'h2')
self.convert_to_ssl(
sni=self.sni,
cert=self.clientcert,
method=self.ssl_version,
options=self.ssl_options,
cipher_list=self.ciphers,
alpn_protos=alpn_protos
)
except exceptions.TlsException as v:
raise PathocError(str(v))
self.sslinfo = SSLInfo(
self.connection.get_peer_cert_chain(),
self.get_current_cipher(),
self.get_alpn_proto_negotiated()
)
if showssl:
print(str(self.sslinfo), file=fp)
if self.use_http2:
self.protocol.check_alpn()
if not self.http2_skip_connection_preface:
self.protocol.perform_client_connection_preface()
if self.timeout:
self.settimeout(self.timeout)
return closer.pop()
def stop(self):
if self.ws_framereader:
self.ws_framereader.terminate.put(None)
def wait(self, timeout=0.01, finish=True):
"""
A generator that yields frames until Pathoc terminates.
timeout: If specified None may be yielded instead if timeout is
reached. If timeout is None, wait forever. If timeout is 0, return
immedately if nothing is on the queue.
finish: If true, consume messages until the reader shuts down.
Otherwise, return None on timeout.
"""
if self.ws_framereader:
while True:
try:
frm = self.ws_framereader.frames_queue.get(
timeout=timeout,
block=True if timeout != 0 else False
)
except queue.Empty:
if finish:
continue
else:
return
if frm is None:
self.ws_framereader.join()
self.ws_framereader = None
return
yield frm
def websocket_send_frame(self, r):
"""
Sends a single websocket frame.
"""
logger = log.ConnectionLogger(
self.fp,
self.hexdump,
False,
None,
self.wfile if self.showreq else None,
)
with logger.ctx() as lg:
lg(">> %s" % r)
language.serve(r, self.wfile, self.settings)
self.wfile.flush()
def websocket_start(self, r):
"""
Performs an HTTP request, and attempts to drop into websocket
connection.
"""
resp = self.http(r)
if resp.status_code == 101:
self.ws_framereader = WebsocketFrameReader(
self.rfile,
self.fp,
self.showresp,
self.hexdump,
self.ws_read_limit,
self.timeout
)
self.ws_framereader.start()
return resp
def http(self, r):
"""
Performs a single request.
r: A language.http.Request object, or a string representing one
request.
Returns Response if we have a non-ignored response.
May raise a exceptions.NetlibException
"""
logger = log.ConnectionLogger(
self.fp,
self.hexdump,
False,
self.rfile if self.showresp else None,
self.wfile if self.showreq else None,
)
with logger.ctx() as lg:
lg(">> %s" % r)
resp, req = None, None
try:
req = language.serve(r, self.wfile, self.settings)
self.wfile.flush()
resp = self.protocol.read_response(self.rfile, treq(method=req["method"].encode()))
resp.sslinfo = self.sslinfo
except exceptions.HttpException as v:
lg("Invalid server response: %s" % v)
raise
except exceptions.TcpTimeout:
if self.ignoretimeout:
lg("Timeout (ignored)")
return None
lg("Timeout")
raise
finally:
if resp:
lg("<< %s %s: %s bytes" % (
resp.status_code, strutils.escape_control_characters(resp.reason) if resp.reason else "", len(resp.content)
))
if resp.status_code in self.ignorecodes:
lg.suppress()
return resp
def request(self, r):
"""
Performs a single request.
r: A language.message.Messsage object, or a string representing
one.
Returns Response if we have a non-ignored response.
May raise a exceptions.NetlibException
"""
if isinstance(r, str):
r = next(language.parse_pathoc(r, self.use_http2))
if isinstance(r, language.http.Request):
if r.ws:
return self.websocket_start(r)
else:
return self.http(r)
elif isinstance(r, language.websockets.WebsocketFrame):
self.websocket_send_frame(r)
elif isinstance(r, language.http2.Request):
return self.http(r)
# elif isinstance(r, language.http2.Frame):
# TODO: do something
def main(args): # pragma: no cover
memo = set()
p = None
if args.repeat == 1:
requests = args.requests
else:
# If we are replaying more than once, we must convert the request generators to lists
# or they will be exhausted after the first run.
# This is bad for the edge-case where get:/:x10000000 (see 0da3e51) is combined with -n 2,
# but does not matter otherwise.
requests = [list(x) for x in args.requests]
try:
requests_done = 0
while True:
if requests_done == args.repeat:
break
if args.wait and requests_done > 0:
time.sleep(args.wait)
requests_done += 1
if args.random:
playlist = random.choice(requests)
else:
playlist = itertools.chain.from_iterable(requests)
p = Pathoc(
(args.host, args.port),
ssl=args.ssl,
sni=args.sni,
ssl_version=args.ssl_version,
ssl_options=args.ssl_options,
clientcert=args.clientcert,
ciphers=args.ciphers,
use_http2=args.use_http2,
http2_skip_connection_preface=args.http2_skip_connection_preface,
http2_framedump=args.http2_framedump,
showreq=args.showreq,
showresp=args.showresp,
explain=args.explain,
hexdump=args.hexdump,
ignorecodes=args.ignorecodes,
timeout=args.timeout,
ignoretimeout=args.ignoretimeout,
showsummary=True
)
trycount = 0
try:
with p.connect(args.connect_to, args.showssl):
for spec in playlist:
if args.explain or args.memo:
spec = spec.freeze(p.settings)
if args.memo:
h = hashlib.sha256(spec.spec()).digest()
if h not in memo:
trycount = 0
memo.add(h)
else:
trycount += 1
if trycount > args.memolimit:
print("Memo limit exceeded...", file=sys.stderr)
return
else:
continue
try:
ret = p.request(spec)
if ret and args.oneshot:
return
# We consume the queue when we can, so it doesn't build up.
for _ in p.wait(timeout=0, finish=False):
pass
except exceptions.NetlibException:
break
for _ in p.wait(timeout=0.01, finish=True):
pass
except exceptions.TcpException as v:
print(str(v), file=sys.stderr)
continue
except PathocError as v:
print(str(v), file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
pass
if p:
p.stop()
| |
"""
Support for apk
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
.. versionadded:: 2017.7.0
"""
import copy
import logging
import salt.utils.data
import salt.utils.itertools
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "pkg"
def __virtual__():
"""
Confirm this module is running on an Alpine Linux distribution
"""
if __grains__.get("os_family", False) == "Alpine":
return __virtualname__
return (False, "Module apk only works on Alpine Linux based systems")
# def autoremove(list_only=False, purge=False):
# return 'Not available'
# def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
# return 'Not available'
# def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
# return 'Not available'
# def upgrade_available(name):
# return 'Not available'
# def version_cmp(pkg1, pkg2, ignore_epoch=False):
# return 'Not available'
# def list_repos():
# return 'Not available'
# def get_repo(repo, **kwargs):
# return 'Not available'
# def del_repo(repo, **kwargs):
# return 'Not available'
# def del_repo_key(name=None, **kwargs):
# return 'Not available'
# def mod_repo(repo, saltenv='base', **kwargs):
# return 'Not available'
# def expand_repo_def(**kwargs):
# return 'Not available'
# def get_selections(pattern=None, state=None):
# return 'Not available'
# def set_selections(path=None, selection=None, clear=False, saltenv='base'):
# return 'Not available'
# def info_installed(*names):
# return 'Not available'
def version(*names, **kwargs):
"""
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
"""
return __salt__["pkg_resource.version"](*names, **kwargs)
def refresh_db(**kwargs):
"""
Updates the package list
- ``True``: Database updated successfully
- ``False``: Problem updating database
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
"""
ret = {}
cmd = ["apk", "update"]
call = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
if call["retcode"] == 0:
errors = []
ret = True
else:
errors = [call["stdout"]]
ret = False
if errors:
raise CommandExecutionError(
"Problem encountered installing package(s)",
info={"errors": errors, "changes": ret},
)
return ret
def _list_pkgs_from_context(versions_as_list):
"""
Use pkg list from __context__
"""
if versions_as_list:
return __context__["pkg.list_pkgs"]
else:
ret = copy.deepcopy(__context__["pkg.list_pkgs"])
__salt__["pkg_resource.stringify"](ret)
return ret
def list_pkgs(versions_as_list=False, **kwargs):
"""
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs versions_as_list=True
"""
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
if any(
[salt.utils.data.is_true(kwargs.get(x)) for x in ("removed", "purge_desired")]
):
return {}
if "pkg.list_pkgs" in __context__ and kwargs.get("use_context", True):
return _list_pkgs_from_context(versions_as_list)
cmd = ["apk", "info", "-v"]
ret = {}
out = __salt__["cmd.run"](cmd, output_loglevel="trace", python_shell=False)
for line in salt.utils.itertools.split(out, "\n"):
pkg_version = "-".join(line.split("-")[-2:])
pkg_name = "-".join(line.split("-")[:-2])
__salt__["pkg_resource.add_pkg"](ret, pkg_name, pkg_version)
__salt__["pkg_resource.sort_pkglist"](ret)
__context__["pkg.list_pkgs"] = copy.deepcopy(ret)
if not versions_as_list:
__salt__["pkg_resource.stringify"](ret)
return ret
def latest_version(*names, **kwargs):
"""
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3> ...
"""
refresh = salt.utils.data.is_true(kwargs.pop("refresh", True))
if not names:
return ""
ret = {}
for name in names:
ret[name] = ""
pkgs = list_pkgs()
# Refresh before looking for the latest version available
if refresh:
refresh_db()
# Upgrade check
cmd = ["apk", "upgrade", "-s"]
out = __salt__["cmd.run_stdout"](cmd, output_loglevel="trace", python_shell=False)
for line in salt.utils.itertools.split(out, "\n"):
try:
name = line.split(" ")[2]
_oldversion = line.split(" ")[3].strip("(")
newversion = line.split(" ")[5].strip(")")
if name in names:
ret[name] = newversion
except (ValueError, IndexError):
pass
# If version is empty, package may not be installed
for pkg in ret:
if not ret[pkg]:
installed = pkgs.get(pkg)
cmd = ["apk", "search", pkg]
out = __salt__["cmd.run_stdout"](
cmd, output_loglevel="trace", python_shell=False
)
for line in salt.utils.itertools.split(out, "\n"):
try:
pkg_version = "-".join(line.split("-")[-2:])
pkg_name = "-".join(line.split("-")[:-2])
if pkg == pkg_name:
if installed == pkg_version:
ret[pkg] = ""
else:
ret[pkg] = pkg_version
except ValueError:
pass
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# TODO: Support specific version installation
def install(name=None, refresh=False, pkgs=None, sources=None, **kwargs):
"""
Install the passed package, add refresh=True to update the apk database.
name
The name of the package to be installed. Note that this parameter is
ignored if either "pkgs" or "sources" is passed. Additionally, please
note that this option can only be used to install packages from a
software repository. To install a package file manually, use the
"sources" option.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
refresh
Whether or not to refresh the package database before installing.
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
sources
A list of IPK packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package. Dependencies are automatically resolved
and marked as auto-installed.
CLI Example:
.. code-block:: bash
salt '*' pkg.install sources='[{"foo": "salt://foo.deb"},{"bar": "salt://bar.deb"}]'
install_recommends
Whether to install the packages marked as recommended. Default is True.
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
"""
refreshdb = salt.utils.data.is_true(refresh)
pkg_to_install = []
old = list_pkgs()
if name and not (pkgs or sources):
if "," in name:
pkg_to_install = name.split(",")
else:
pkg_to_install = [name]
if pkgs:
# We don't support installing specific version for now
# so transform the dict in list ignoring version provided
pkgs = [next(iter(p)) for p in pkgs if isinstance(p, dict)]
pkg_to_install.extend(pkgs)
if not pkg_to_install:
return {}
if refreshdb:
refresh_db()
cmd = ["apk", "add"]
# Switch in update mode if a package is already installed
for _pkg in pkg_to_install:
if old.get(_pkg):
cmd.append("-u")
break
cmd.extend(pkg_to_install)
out = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
if out["retcode"] != 0 and out["stderr"]:
errors = [out["stderr"]]
else:
errors = []
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problem encountered installing package(s)",
info={"errors": errors, "changes": ret},
)
return ret
def purge(name=None, pkgs=None, **kwargs):
"""
Alias to remove
"""
return remove(name=name, pkgs=pkgs, purge=True)
def remove(
name=None, pkgs=None, purge=False, **kwargs
): # pylint: disable=unused-argument
"""
Remove packages using ``apk del``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
"""
old = list_pkgs()
pkg_to_remove = []
if name:
if "," in name:
pkg_to_remove = name.split(",")
else:
pkg_to_remove = [name]
if pkgs:
pkg_to_remove.extend(pkgs)
if not pkg_to_remove:
return {}
if purge:
cmd = ["apk", "del", "--purge"]
else:
cmd = ["apk", "del"]
cmd.extend(pkg_to_remove)
out = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
if out["retcode"] != 0 and out["stderr"]:
errors = [out["stderr"]]
else:
errors = []
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problem encountered removing package(s)",
info={"errors": errors, "changes": ret},
)
return ret
def upgrade(name=None, pkgs=None, refresh=True, **kwargs):
"""
Upgrades all packages via ``apk upgrade`` or a specific package if name or
pkgs is specified. Name is ignored if pkgs is specified
Returns a dict containing the changes.
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
"""
ret = {
"changes": {},
"result": True,
"comment": "",
}
if salt.utils.data.is_true(refresh):
refresh_db()
old = list_pkgs()
pkg_to_upgrade = []
if name and not pkgs:
if "," in name:
pkg_to_upgrade = name.split(",")
else:
pkg_to_upgrade = [name]
if pkgs:
pkg_to_upgrade.extend(pkgs)
if pkg_to_upgrade:
cmd = ["apk", "add", "-u"]
cmd.extend(pkg_to_upgrade)
else:
cmd = ["apk", "upgrade"]
call = __salt__["cmd.run_all"](
cmd, output_loglevel="trace", python_shell=False, redirect_stderr=True
)
if call["retcode"] != 0:
ret["result"] = False
if call["stdout"]:
ret["comment"] = call["stdout"]
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret["changes"] = salt.utils.data.compare_dicts(old, new)
return ret
def list_upgrades(refresh=True, **kwargs):
"""
List all available package upgrades.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
"""
ret = {}
if salt.utils.data.is_true(refresh):
refresh_db()
cmd = ["apk", "upgrade", "-s"]
call = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
if call["retcode"] != 0:
comment = ""
if "stderr" in call:
comment += call["stderr"]
if "stdout" in call:
comment += call["stdout"]
raise CommandExecutionError(comment)
else:
out = call["stdout"]
for line in out.splitlines():
if "Upgrading" in line:
name = line.split(" ")[2]
_oldversion = line.split(" ")[3].strip("(")
newversion = line.split(" ")[5].strip(")")
ret[name] = newversion
return ret
def file_list(*packages, **kwargs):
"""
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
"""
return file_dict(*packages)
def file_dict(*packages, **kwargs):
"""
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of _every_ file on the system's
package database (not generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
"""
errors = []
ret = {}
cmd_files = ["apk", "info", "-L"]
if not packages:
return "Package name should be provided"
for package in packages:
files = []
cmd = cmd_files[:]
cmd.append(package)
out = __salt__["cmd.run_all"](cmd, output_loglevel="trace", python_shell=False)
for line in out["stdout"].splitlines():
if line.endswith("contains:"):
continue
else:
files.append(line)
if files:
ret[package] = files
return {"errors": errors, "packages": ret}
def owner(*paths, **kwargs):
"""
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.apk.version`, if a single
path is passed, a string will be returned, and if multiple paths are passed,
a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Example:
.. code-block:: bash
salt '*' pkg.owns /usr/bin/apachectl
salt '*' pkg.owns /usr/bin/apachectl /usr/bin/basename
"""
if not paths:
return "You must provide a path"
ret = {}
cmd_search = ["apk", "info", "-W"]
for path in paths:
cmd = cmd_search[:]
cmd.append(path)
output = __salt__["cmd.run_stdout"](
cmd, output_loglevel="trace", python_shell=False
)
if output:
if "ERROR:" in output:
ret[path] = "Could not find owner package"
else:
ret[path] = output.split("by ")[1].strip()
else:
ret[path] = "Error running {}".format(cmd)
return ret
| |
import asyncio
import traceback
from httptools import HttpParserUpgrade
from httptools import HttpRequestParser, parse_url
from httptools.parser.errors import HttpParserError
from typing import Dict, List, Any, Awaitable
from mach9.response import ALL_STATUS_CODES
from mach9.websocket import upgrade_to_websocket
from mach9.timer import get_current_time
class BodyChannel(asyncio.Queue):
def __init__(self, transport):
super().__init__()
self._transport = transport
self._max_qsize = 10
self._is_reading = True
async def send(self, body_chunk):
self.put_nowait(body_chunk)
if self._is_reading and self.qsize() >= self._max_qsize:
self._transport.pause_reading()
self._is_reading = False
def receive(self):
if not self._is_reading:
self._transport.resume_reading()
self._is_reading = True
return self.get()
class ReplyChannel:
def __init__(self, protocol):
super().__init__()
self._protocol = protocol
async def send(self, message):
self._protocol.send(message)
class HttpProtocol(asyncio.Protocol):
__slots__ = (
# event loop, connection
'loop', 'transport', 'connections', 'signal',
# request params
'parser', 'url', 'headers',
# request config
'request_handler', 'request_timeout', 'request_max_size',
# enable or disable access log / error log purpose
'has_log', 'log', 'netlog',
# connection management
'_is_upgrade',
'_total_request_size', '_timeout_handler', '_last_request_time',
'body_channel', 'message')
def __init__(self, *, loop, request_handler: Awaitable,
log=None, signal=None, connections=set(), request_timeout=60,
request_max_size=None, has_log=True,
keep_alive=True, netlog=None):
'''signal is shared'''
self.loop = loop
self.transport = None
self.parser = None
self.url = None
self.headers = None
self.body_channel = None
self.message = None
self.signal = signal
self.has_log = has_log
self.log = log
self.netlog = netlog
self.connections = connections
self.request_handler = request_handler
self.request_timeout = request_timeout
self.request_max_size = request_max_size
self._total_request_size = 0
self._timeout_handler = None
self._last_request_time = None
self._request_handler_task = None
self._request_stream_task = None
self._is_upgrade = False
# config.KEEP_ALIVE or not check_headers()['connection_close']
self._keep_alive = keep_alive
@property
def keep_alive(self):
return (self._keep_alive
and not self.signal.stopped
and self.parser
and self.parser.should_keep_alive())
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._timeout_handler = self.loop.call_later(
self.request_timeout, self.connection_timeout)
self.transport = transport
self._last_request_time = get_current_time()
def connection_lost(self, exc):
self.connections.discard(self)
self._timeout_handler.cancel()
def connection_timeout(self):
# Check if
time_elapsed = get_current_time() - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._timeout_handler = (
self.loop.call_later(time_left, self.connection_timeout))
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
exception = (408, 'Request Timeout')
self.write_error(exception)
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data: bytes):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
if self._total_request_size > self.request_max_size:
exception = (413, 'Payload Too Large')
self.write_error(exception)
# Create parser if this is the first time we're receiving data
if self.parser is None:
self.headers = []
self.parser = HttpRequestParser(self)
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserUpgrade:
upgrade_to_websocket(self)
except HttpParserError:
exception = (400, 'Bad Request')
self.write_error(exception)
def on_url(self, url: bytes):
self.url = url
def on_header(self, name: bytes, value: bytes):
# for websocket
name = name.lower()
if name == b'content-length' and int(value) > self.request_max_size:
exception = (413, 'Payload Too Large')
self.write_error(exception)
if name == b'upgrade':
self._is_upgrade = True
self.headers.append([name, value])
def on_headers_complete(self):
if self._is_upgrade:
return
channels = {}
self.message = self.get_message(
self.transport, self.parser.get_http_version(),
self.parser.get_method(), self.url, self.headers)
channels['body'] = BodyChannel(self.transport)
channels['reply'] = ReplyChannel(self)
self.body_channel = channels['body']
self._request_handler_task = self.loop.create_task(
self.request_handler(self.message, channels))
def on_body(self, body: bytes):
if self._is_upgrade:
return
body_chunk = self.get_request_body_chunk(body, False, True)
self._request_stream_task = self.loop.create_task(
self.body_channel.send(body_chunk))
def on_message_complete(self):
if self._is_upgrade:
return
body_chunk = self.get_request_body_chunk(b'', False, False)
self._request_stream_task = self.loop.create_task(
self.body_channel.send(body_chunk))
def get_request_body_chunk(self, content: bytes, closed: bool,
more_content: bool) -> Dict[str, Any]:
'''
http://channels.readthedocs.io/en/stable/asgi/www.html#request-body-chunk
'''
return {
'content': content,
'closed': closed,
'more_content': more_content
}
def get_message(self, transport, http_version: str, method: bytes,
url: bytes, headers: List[List[bytes]]) -> Dict[str, Any]:
'''
http://channels.readthedocs.io/en/stable/asgi/www.html#request
'''
url_obj = parse_url(url)
if url_obj.schema is None:
if transport.get_extra_info('sslcontext'):
scheme = 'https'
else:
scheme = 'http'
else:
scheme = url_obj.schema.decode()
path = '' if url_obj.path is None else url_obj.path.decode('utf-8')
query = b'' if url_obj.query is None else url_obj.query
return {
'channel': 'http.request',
'reply_channel': None,
'http_version': http_version,
'method': method.decode(),
'scheme': scheme,
'path': path,
'query_string': query,
'root_path': '',
'headers': headers,
'body': b'',
'body_channel': None,
'client': transport.get_extra_info('peername'),
'server': transport.get_extra_info('sockname')
}
def check_headers(self, headers: List[List[bytes]]) -> Dict[str, bool]:
connection_close = False
content_length = False
for key, value in headers:
if key == b'Connection' and value == b'close':
connection_close = True
if key == b'Content-Length':
content_length = True
return {
'connection_close': connection_close,
'content_length': content_length
}
def after_write(self, more_content, keep_alive):
if not more_content and not keep_alive:
self.transport.close()
elif not more_content and keep_alive:
self._last_request_time = get_current_time()
self.cleanup()
def is_response_chunk(self, message: Dict[str, Any]) -> bool:
return 'status' not in message and 'headers' not in message
def make_header_content(self, headers, result_headers,
content, more_content):
header_content = b''
if headers is not None:
_header_content = []
if not more_content and not result_headers['content_length']:
_header_content.extend([b'Content-Length: ',
str(len(content)).encode(), b'\r\n'])
for key, value in headers:
if key == b'Connection':
continue
_header_content.extend([key, b': ', value, b'\r\n'])
header_content = b''.join(_header_content)
return header_content
def send(self, message: Dict[str, Any]):
transport = self.transport
status = message.get('status')
headers = message.get('headers')
content = message.get('content')
more_content = message.get('more_content', False)
if headers is not None:
result_headers = self.check_headers(headers)
if result_headers['connection_close'] is True:
self._keep_alive = False
keep_alive = self.keep_alive
if self.is_response_chunk(message):
content_length = len(content)
if more_content and content_length > 0:
transport.write(b'%x\r\n%b\r\n' % (content_length, content))
self.after_write(more_content, keep_alive)
elif more_content is False:
transport.write(b'0\r\n\r\n')
self.after_write(more_content, keep_alive)
return
keep_alive_timeout = self.request_timeout
timeout_header = b''
if keep_alive and keep_alive_timeout is not None:
timeout_header = b'Keep-Alive: %d\r\n' % keep_alive_timeout
header_content = self.make_header_content(headers, result_headers,
content, more_content)
response = (
b'HTTP/1.1 %d %b\r\n'
b'Connection: %b\r\n'
b'%b'
b'%b\r\n'
b'%b') % (
status,
ALL_STATUS_CODES[status],
b'keep-alive' if keep_alive else b'close',
timeout_header,
header_content,
content
)
transport.write(response)
self.after_write(more_content, keep_alive)
def write_error(self, exception):
try:
status, content = exception
content = 'Error: {}'.format(content).encode()
headers = []
self.send({
'status': status,
'headers': headers,
'content': content,
'more_content': False
})
except RuntimeError:
self.log.error(
'Connection lost before error written.')
except Exception as e:
self.bail_out(
'Writing error failed, connection closed {}'.format(repr(e)),
from_error=True)
finally:
self.transport.close()
def bail_out(self, message, from_error=False):
if from_error or self.transport.is_closing():
self.log.error(
('Transport closed @ {} and exception '
'experienced during error handling').format(
self.transport.get_extra_info('peername')))
self.log.debug(
'Exception:\n{}'.format(traceback.format_exc()))
else:
exception = (500, message)
self.write_error(exception)
self.log.error(message)
def cleanup(self):
self.parser = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self.body_channel = None
self.message = None
def close_if_idle(self):
'''Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
'''
if not self.parser:
self.transport.close()
return True
return False
| |
import cupy
from cupy import _util
from cupyx.scipy.ndimage import _filters_core
def _get_sub_kernel(f):
"""
Takes the "function" given to generic_filter and returns the "sub-kernel"
that will be called, one of RawKernel or ReductionKernel.
This supports:
* cupy.RawKernel
no checks are possible
* cupy.ReductionKernel
checks that there is a single input and output
"""
if isinstance(f, cupy.RawKernel):
# We will assume that it has the correct API
return f
elif isinstance(f, cupy.ReductionKernel):
if f.nin != 1 or f.nout != 1:
raise TypeError('ReductionKernel must have 1 input and output')
return f
elif isinstance(f, cupy.ElementwiseKernel):
# special error message for ElementwiseKernels
raise TypeError('only ReductionKernel allowed (not ElementwiseKernel)')
else:
raise TypeError('bad function type')
@_util.memoize(for_each_device=True)
def _get_generic_filter_red(rk, in_dtype, out_dtype, filter_size, mode,
wshape, offsets, cval, int_type):
"""Generic filter implementation based on a reduction kernel."""
# Get the temporary output c type
in_param, out_param = rk.in_params[0], rk.out_params[0]
out_ctype = out_param.ctype
if out_param.dtype is None: # resolve template
out_ctype = cupy._core._scalar.get_typename(
in_dtype if out_param.ctype == in_param.ctype else out_dtype)
# Get code chunks
setup = '''
int iv = 0;
X values[{size}];
CArray<X, 1, true, true> sub_in(values, {{{size}}});
{out_ctype} val_out;
CArray<{out_ctype}, 1, true, true> sub_out(&val_out, {{1}});
'''.format(size=filter_size, out_ctype=out_ctype)
sub_call = '''reduction_kernel::{}(sub_in, sub_out);
y = cast<Y>(val_out);'''.format(rk.name)
sub_kernel = _reduction_kernel_code(rk, filter_size, out_dtype, in_dtype)
# Get the final kernel
return _filters_core._generate_nd_kernel(
'generic_{}_{}'.format(filter_size, rk.name),
setup, 'values[iv++] = {value};', sub_call,
mode, wshape, int_type, offsets, cval, preamble=sub_kernel,
options=getattr(rk, 'options', ()))
def _reduction_kernel_code(rk, filter_size, out_dtype, in_dtype):
# NOTE: differences from the code generated for real reduction kernels:
# * input is always 1D and always less than 2^31 elements
# * output is always 1D with a single element
# * never across threads (no _block_stride, _sdata, _sdata_raw, _REDUCE,
# _tid, _J, _i, _i_base, _j_offset, _J_offset, _j_stride, _J_stride)
# Also, the code is moved into a namespace so that clashes are minimized
# between the typedefs for the "template" variables.
# figure out the types
types = {}
in_param, out_param = rk.in_params[0], rk.out_params[0]
in_ctype = _get_type_info(in_param, in_dtype, types)
out_ctype = _get_type_info(out_param, out_dtype, types)
types = '\n'.join('typedef {} {};'.format(typ, name)
for name, typ in types.items())
return '''namespace reduction_kernel {{
{type_preamble}
{preamble}
__device__
void {name}({in_const} CArray<{in_ctype}, 1, true, true>& _raw_{in_name},
CArray<{out_ctype}, 1, true, true>& _raw_{out_name}) {{
// these are just provided so if they are available for the RK
CIndexer<1> _in_ind({{{size}}});
CIndexer<0> _out_ind;
#define REDUCE(a, b) ({reduce_expr})
#define POST_MAP(a) ({post_map_expr})
typedef {reduce_type} _type_reduce;
_type_reduce _s = _type_reduce({identity});
for (int _j = 0; _j < {size}; ++_j) {{
_in_ind.set(_j);
{in_const} {in_ctype}& {in_name} = _raw_{in_name}[_j];
_type_reduce _a = static_cast<_type_reduce>({pre_map_expr});
_s = REDUCE(_s, _a);
}}
_out_ind.set(0);
{out_ctype} &{out_name} = _raw_{out_name}[0];
POST_MAP(_s);
#undef REDUCE
#undef POST_MAP
}}
}}'''.format(
name=rk.name, type_preamble=types, preamble=rk.preamble,
in_const='const' if in_param.is_const else '',
in_ctype=in_ctype, in_name=in_param.name,
out_ctype=out_ctype, out_name=out_param.name,
pre_map_expr=rk.map_expr,
identity='' if rk.identity is None else rk.identity,
size=filter_size,
reduce_type=rk.reduce_type, reduce_expr=rk.reduce_expr,
post_map_expr=rk.post_map_expr,
)
def _get_type_info(param, dtype, types):
if param.dtype is not None:
return param.ctype
# Template type -> map to actual output type
ctype = cupy._core._scalar.get_typename(dtype)
types.setdefault(param.ctype, ctype)
return ctype
@_util.memoize(for_each_device=True)
def _get_generic_filter_raw(rk, filter_size, mode, wshape, offsets, cval,
int_type):
"""Generic filter implementation based on a raw kernel."""
setup = '''
int iv = 0;
double values[{}];
double val_out;'''.format(filter_size)
sub_call = '''raw_kernel::{}(values, {}, &val_out);
y = cast<Y>(val_out);'''.format(rk.name, filter_size)
return _filters_core._generate_nd_kernel(
'generic_{}_{}'.format(filter_size, rk.name),
setup, 'values[iv++] = cast<double>({value});', sub_call,
mode, wshape, int_type, offsets, cval,
preamble='namespace raw_kernel {{\n{}\n}}'.format(
# Users can test RawKernel independently, but when passed to here
# it must be used as a device function here. In fact, RawKernel
# wouldn't compile if code only contains device functions, so this
# is necessary.
rk.code.replace('__global__', '__device__')),
options=rk.options)
@_util.memoize(for_each_device=True)
def _get_generic_filter1d(rk, length, n_lines, filter_size, origin, mode, cval,
in_ctype, out_ctype, int_type):
"""
The generic 1d filter is different than all other filters and thus is the
only filter that doesn't use _generate_nd_kernel() and has a completely
custom raw kernel.
"""
in_length = length + filter_size - 1
start = filter_size // 2 + origin
end = start + length
if mode == 'constant':
boundary, boundary_early = '', '''
for (idx_t j = 0; j < {start}; ++j) {{ input_line[j] = {cval}; }}
for (idx_t j = {end}; j<{in_length}; ++j) {{ input_line[j] = {cval}; }}
'''.format(start=start, end=end, in_length=in_length, cval=cval)
else:
if length == 1:
a = b = 'j_ = 0;'
elif mode == 'reflect':
j = ('j_ = ({j}) % ({length} * 2);\n'
'j_ = min(j_, 2 * {length} - 1 - j_);')
a = j.format(j='-1 - j_', length=length)
b = j.format(j='j_', length=length)
elif mode == 'mirror':
j = ('j_ = 1 + (({j}) - 1) % (({length} - 1) * 2);\n'
'j_ = min(j_, 2 * {length} - 2 - j_);')
a = j.format(j='-j_', length=length)
b = j.format(j='j_', length=length)
elif mode == 'nearest':
a, b = 'j_ = 0;', 'j_ = {length}-1;'.format(length=length)
elif mode == 'wrap':
a = 'j_ = j_ % {length} + {length};'.format(length=length)
b = 'j_ = j_ % {length};'.format(length=length)
loop = '''for (idx_t j = {{}}; j < {{}}; ++j) {{{{
idx_t j_ = j - {start};
{{}}
input_line[j] = input_line[j_ + {start}];
}}}}'''.format(start=start)
boundary_early = ''
boundary = (loop.format(0, start, a) + '\n' +
loop.format(end, in_length, b))
name = 'generic1d_{}_{}_{}'.format(length, filter_size, rk.name)
code = '''#include "cupy/carray.cuh"
#include "cupy/complex.cuh"
#include <type_traits> // let Jitify handle this
namespace raw_kernel {{\n{rk_code}\n}}
{CAST}
typedef unsigned char byte;
typedef {in_ctype} X;
typedef {out_ctype} Y;
typedef {int_type} idx_t;
__device__ idx_t offset(idx_t i, idx_t axis, idx_t ndim,
const idx_t* shape, const idx_t* strides) {{
idx_t index = 0;
for (idx_t a = ndim; --a > 0; ) {{
if (a == axis) {{ continue; }}
index += (i % shape[a]) * strides[a];
i /= shape[a];
}}
return index + strides[0] * i;
}}
extern "C" __global__
void {name}(const byte* input, byte* output, const idx_t* x) {{
const idx_t axis = x[0], ndim = x[1],
*shape = x+2, *in_strides = x+2+ndim, *out_strides = x+2+2*ndim;
const idx_t in_elem_stride = in_strides[axis];
const idx_t out_elem_stride = out_strides[axis];
double input_line[{in_length}];
double output_line[{length}];
{boundary_early}
for (idx_t i = ((idx_t)blockIdx.x) * blockDim.x + threadIdx.x;
i < {n_lines};
i += ((idx_t)blockDim.x) * gridDim.x) {{
// Copy line from input (with boundary filling)
const byte* input_ = input + offset(i, axis, ndim, shape, in_strides);
for (idx_t j = 0; j < {length}; ++j) {{
input_line[j+{start}] = (double)*(X*)(input_+j*in_elem_stride);
}}
{boundary}
raw_kernel::{rk_name}(input_line, {in_length}, output_line, {length});
// Copy line to output
byte* output_ = output + offset(i, axis, ndim, shape, out_strides);
for (idx_t j = 0; j < {length}; ++j) {{
*(Y*)(output_+j*out_elem_stride) = cast<Y>(output_line[j]);
}}
}}
}}'''.format(n_lines=n_lines, length=length, in_length=in_length, start=start,
in_ctype=in_ctype, out_ctype=out_ctype, int_type=int_type,
boundary_early=boundary_early, boundary=boundary,
name=name, rk_name=rk.name,
# Users can test RawKernel independently, but when passed to here
# it must be used as a device function here. In fact, RawKernel
# wouldn't compile if code only contains device functions, so this
# is necessary.
rk_code=rk.code.replace('__global__', '__device__'),
CAST=_filters_core._CAST_FUNCTION)
return cupy.RawKernel(code, name, ('--std=c++11',) + rk.options,
jitify=True)
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for creating or transforming model inputs."""
import base64
import collections
import json
import os
import re
from typing import (Dict, Iterable, List, Mapping, Optional, Sequence, Text,
Tuple, MutableSequence, Union)
import attr
import tensorflow.compat.v1 as tf
from etcmodel import feature_utils
from etcmodel import tensor_utils
from etcmodel.models import modeling
MODEL_CONFIG_FILENAME = 'etc_config.json'
@attr.s
class GlobalLocalTransformerSideInputs(object):
"""GlobalLocalTransformer side inputs ("att_mask" and "relative_att_ids").
See `GlobalLocalTransformerLayers.call()` in `layers/transformer.py` for a
description of these side inputs.
"""
l2l_att_mask = attr.ib() # type: Optional[tf.Tensor]
g2g_att_mask = attr.ib() # type: Optional[tf.Tensor]
l2g_att_mask = attr.ib() # type: Optional[tf.Tensor]
g2l_att_mask = attr.ib() # type: Optional[tf.Tensor]
l2l_relative_att_ids = attr.ib() # type: Optional[tf.Tensor]
g2g_relative_att_ids = attr.ib() # type: Optional[tf.Tensor]
l2g_relative_att_ids = attr.ib() # type: Optional[tf.Tensor]
g2l_relative_att_ids = attr.ib() # type: Optional[tf.Tensor]
def to_dict(self, exclude_none_values=False):
"""Returns attributes in a Python dictionary."""
if exclude_none_values:
return {k: v for k, v in self.__dict__.items() if v is not None}
else:
return dict(self.__dict__)
def get_model_config(
model_dir: Text,
source_file: Optional[Text] = None,
source_base64: Optional[Text] = None,
write_from_source: Optional[bool] = True) -> modeling.EtcConfig:
"""Reads model config from `model_dir`, falling back to source file/base64.
If the JSON config file isn't found in `model_dir`, then exactly one of
`source_file` or `source_base64` should be given to read the config from
instead.
Args:
model_dir: Model directory containing the config file.
source_file: Optional source file to read config file from if not present in
`model_dir`.
source_base64: Optional Base64 encoding of JSON content to read config file
from if not present in `model_dir`. If this is specified, then
`source_file` must not be.
write_from_source: If True (default), write the source config to `model_dir`
if it isn't present already.
Returns:
An `EtcConfig` object.
"""
model_config_path = os.path.join(model_dir, MODEL_CONFIG_FILENAME)
if tf.io.gfile.exists(model_config_path):
return modeling.EtcConfig.from_json_file(model_config_path)
if source_file is None and source_base64 is None:
raise ValueError(
'Either `source_file` or `source_base64` must be specified for initial '
'model configuration.')
elif source_file is not None and source_base64 is not None:
raise ValueError('Only one of `source_file` or `source_base64` can be '
'specified, not both.')
if source_file is not None:
with tf.io.gfile.GFile(source_file, 'r') as reader:
model_config_json_str = reader.read()
elif source_base64 is not None:
model_config_json_str = base64.b64decode(
source_base64.encode('utf-8')).decode('utf-8')
model_config_dict = json.loads(
model_config_json_str, object_pairs_hook=collections.OrderedDict)
if write_from_source:
with tf.io.gfile.GFile(model_config_path, 'w') as writer:
writer.write(model_config_json_str)
return modeling.EtcConfig.from_dict(model_config_dict)
def make_global_local_transformer_side_inputs(
long_breakpoints: tf.Tensor,
global_breakpoints: tf.Tensor,
sentence_ids: tf.Tensor,
local_radius: int,
relative_pos_max_distance: int,
use_hard_g2l_mask: bool = False,
use_hard_l2g_mask: bool = False,
name: Optional[Text] = None) -> GlobalLocalTransformerSideInputs:
"""Makes side input tensors based on the given breakpoints and sentence ids.
Note that the concept of `breakpoints` is primarily relevant for
pre-training, where we pack multiple shorter examples into 1 long example.
The breakpoints are used to separate the original shorter examples, with
a `1` occurring at the last token of each packed example.
For instance, if we packed three examples, with long lengths 2, 3, and 4,
and the maximum long length is 10, then `long_breakpoints` would look
like: [0, 1, 0, 0, 1, 0, 0, 0, 1, 0].
If we're not packing examples (e.g. for all our fine-tuning tasks), the
`breakpoints` features will only have a single `1`. For instance, if our
example has 8 long tokens, and the max long length is 10, then
`long_breakpoints` would look like: [0, 0, 0, 0, 0, 0, 0, 1, 0, 0].
Note that in this case a classic BERT `input_mask` can be obtained from
`long_breakpoints` via `tf.cumsum(long_breakpoints, axis=-1, reverse=True)`.
The `sentence_ids` tensor defines the connection between long and global
tokens. There's one global token per sentence, and the value in
`sentence_ids` represents which global token (sentence) a given long token
belongs to. For instance, if we had a single example with three sentences
of lengths 3, 1, and 2, and the max length is 10, then `sentence_ids`
would look like: [0, 0, 0, 1, 2, 2, 0, 0, 0, 0].
Note that the padding tokens use value 0 above, but any value is fine since
padding tokens will be masked.
Args:
long_breakpoints: <int32>[batch_size, long_seq_len] Tensor of ending
breakpoints separating different packed examples.
global_breakpoints: <int32>[batch_size, global_seq_len] Tensor of ending
breakpoints separating different packed examples.
sentence_ids: <int32>[batch_size, long_seq_len] Tensor of ids indicating
which sentence each token belongs to. For this dataset, "sentence" refers
to real natural language sentence, not a BERT "sentence" from the "next
sentence prediction" task.
local_radius: How many tokens to the left/right for input tokens to locally
self-attend to. For example, a value of 1 would allow each token to only
attend to 1 token to the left and 1 token to the right of it.
relative_pos_max_distance: Maximum distance to use for relative position
representations. All larger distances will be clipped to this value. Use 0
to skip relative position representations entirely.
use_hard_g2l_mask: If True, global tokens only attend to tokens of the
corresponding sentences in the long input. If False, global tokens attend
to all sentences within the corresponding global example.
use_hard_l2g_mask: If True, long tokens only attend to tokens of the
corresponding global tokens. If False, long tokens attend to all the
global tokens within the corresponding global example.
name: A name for the operation (optional).
Returns:
A `GlobalLocalTransformerSideInputs` with all relevant tensors set.
"""
with tf.name_scope(name or 'make_global_local_transformer_side_inputs'):
long_breakpoints = tf.convert_to_tensor(long_breakpoints)
global_breakpoints = tf.convert_to_tensor(global_breakpoints)
long_example_ids = tf.cumsum(long_breakpoints, axis=-1, reverse=True)
global_example_ids = tf.cumsum(global_breakpoints, axis=-1, reverse=True)
return make_global_local_transformer_side_inputs_from_example_ids(
long_example_ids=long_example_ids,
global_example_ids=global_example_ids,
sentence_ids=sentence_ids,
local_radius=local_radius,
relative_pos_max_distance=relative_pos_max_distance,
use_hard_g2l_mask=use_hard_g2l_mask,
use_hard_l2g_mask=use_hard_l2g_mask,
name=name)
def make_global_local_transformer_side_inputs_from_example_ids(
long_example_ids: tf.Tensor,
global_example_ids: tf.Tensor,
sentence_ids: tf.Tensor,
local_radius: int,
relative_pos_max_distance: int,
use_hard_g2l_mask: bool = False,
use_hard_l2g_mask: bool = False,
name: Optional[Text] = None) -> GlobalLocalTransformerSideInputs:
"""Makes side input tensors based on the given example and sentence ids.
When packing examples (e.g. for pre-training), each example must have a
unique id for `long_example_ids`/`global_example_ids`, and padding must
also have a unique id distinct from all the example ids.
When not packing examples, there will simply be two unique ids: one for
example tokens, and another for padding. Note that in this case, the classic
BERT `input_mask` is a valid special case of `long_example_ids`.
The other arguments have the same interpretation as in
`make_global_local_transformer_side_inputs`.
Args:
long_example_ids: <int32>[batch_size, long_seq_len] Tensor of example ids of
different packed examples.
global_example_ids: <int32>[batch_size, global_seq_len] Tensor of example
ids of different packed examples.
sentence_ids: <int32>[batch_size, long_seq_len] Tensor of ids indicating
which sentence each token belongs to. For this dataset, "sentence" refers
to real natural language sentence, not a BERT "sentence" from the "next
sentence prediction" task.
local_radius: How many tokens to the left/right for input tokens to locally
self-attend to. For example, a value of 1 would allow each token to only
attend to 1 token to the left and 1 token to the right of it.
relative_pos_max_distance: Maximum distance to use for relative position
representations. All larger distances will be clipped to this value. Use 0
to skip relative position representations entirely.
use_hard_g2l_mask: If True, global tokens only attend to tokens of the
corresponding sentences in the long input. If False, global tokens attend
to all sentences within the corresponding global example.
use_hard_l2g_mask: If True, long tokens only attend to tokens of the
corresponding global tokens. If False, long tokens attend to all the
global tokens within the corresponding global example.
name: A name for the operation (optional).
Returns:
A `GlobalLocalTransformerSideInputs` with all relevant tensors set.
"""
with tf.name_scope(name or 'make_global_local_transformer_side_inputs'):
long_example_ids = tf.convert_to_tensor(long_example_ids)
global_example_ids = tf.convert_to_tensor(global_example_ids)
sentence_ids = tf.convert_to_tensor(sentence_ids)
long_seq_len = tensor_utils.get_shape_list(long_example_ids)[1]
global_seq_len = tensor_utils.get_shape_list(global_example_ids)[1]
l2l_att_mask = feature_utils.make_local_segmented_att_mask(
long_example_ids, local_radius)
g2g_att_mask = feature_utils.make_segmented_att_mask(global_example_ids)
l2g_att_mask = tf.cast(
tf.equal(long_example_ids[:, :, tf.newaxis],
global_example_ids[:, tf.newaxis, :]), tf.int32)
g2l_att_mask = tf.transpose(l2g_att_mask, perm=[0, 2, 1])
if use_hard_g2l_mask:
# Have each global token attend to just one sentence instead of having
# it attend to all the sentences within a global example.
global_range = tf.range(global_seq_len, dtype=sentence_ids.dtype)
hard_g2l_att_mask = tf.cast(
tf.equal(global_range[tf.newaxis, :, tf.newaxis],
sentence_ids[:, tf.newaxis, :]), tf.int32)
g2l_att_mask *= hard_g2l_att_mask
if use_hard_l2g_mask:
# Have each long token attend to just the corresponding global token
# instead of having it attend to all the global tokens within a
# global example.
global_range = tf.range(global_seq_len, dtype=sentence_ids.dtype)
hard_l2g_att_mask = tf.cast(
tf.equal(sentence_ids[:, :, tf.newaxis],
global_range[tf.newaxis, tf.newaxis, :]), tf.int32)
l2g_att_mask *= hard_l2g_att_mask
batch_size = tf.shape(long_example_ids)[0]
l2l_relative_att_ids = None
g2g_relative_att_ids = None
l2g_relative_att_ids = None
g2l_relative_att_ids = None
if relative_pos_max_distance > 0:
relative_pos_generator = feature_utils.RelativePositionGenerator(
relative_pos_max_distance)
l2l_relative_att_ids = relative_pos_generator.make_local_relative_att_ids(
seq_len=long_seq_len,
local_radius=local_radius,
batch_size=batch_size)
g2g_relative_att_ids = relative_pos_generator.make_relative_att_ids(
seq_len=global_seq_len, batch_size=batch_size)
global_range = tf.range(global_seq_len, dtype=sentence_ids.dtype)
l2g_relative_att_ids = tf.cast(
tf.equal(sentence_ids[:, :, tf.newaxis],
global_range[tf.newaxis, tf.newaxis, :]), tf.int32)
g2l_relative_att_ids = tf.transpose(l2g_relative_att_ids, perm=[0, 2, 1])
# For fused attention, l2l and l2g share the same relative vocabulary, as
# do g2g and g2l, so we add an offset for l2g and g2l so their original
# 0/1 ids don't collide with l2l and g2g relative position ids.
l2g_relative_att_ids += relative_pos_generator.relative_vocab_size
g2l_relative_att_ids += relative_pos_generator.relative_vocab_size
return GlobalLocalTransformerSideInputs(
l2l_att_mask=l2l_att_mask,
g2g_att_mask=g2g_att_mask,
l2g_att_mask=l2g_att_mask,
g2l_att_mask=g2l_att_mask,
l2l_relative_att_ids=l2l_relative_att_ids,
g2g_relative_att_ids=g2g_relative_att_ids,
l2g_relative_att_ids=l2g_relative_att_ids,
g2l_relative_att_ids=g2l_relative_att_ids)
def make_fixed_block_side_inputs(
input_mask: tf.Tensor,
num_tokens_per_block: int,
local_radius: int,
relative_pos_max_distance: int,
use_hard_g2l_mask: bool = False,
use_hard_l2g_mask: bool = False,
global_token_id: int = 1,
name: Optional[Text] = None
) -> Tuple[GlobalLocalTransformerSideInputs, tf.Tensor]:
"""Utility for creating side inputs in a "fixed blocks" pattern.
The "fixed blocks" experiments for NQ and OpenKP are implemented via example
generation rather than using this function, but we include this function
to illustrate how side inputs can be generated given just a BERT-style
`input_mask` feature. The corresponding global tokens are generated
as part of this function too, so no global features are required as input.
Args:
input_mask: <int32>[batch_size, long_seq_len] Tensor of 1 and 0 values, with
1 for actual tokens and 0 for padding. This is the same format as
original BERT. `long_seq_len` must be statically known.
num_tokens_per_block: Positive integer number of long tokens to assign to
each global token. For pre-training on the original BERT data (which was
also used for ETC pre-training), the dataset implied a value of about 27,
but values like 16 or 32 would also be reasonable.
local_radius: How many tokens to the left/right for input tokens to locally
self-attend to. For example, a value of 1 would allow each token to only
attend to 1 token to the left and 1 token to the right of it.
relative_pos_max_distance: Maximum distance to use for relative position
representations. All larger distances will be clipped to this value. Use
0 to skip relative position representations entirely.
use_hard_g2l_mask: If True, global tokens only attend to tokens of their
corresponding block in the long input. If False, global tokens attend to
all non-padding long tokens. False is the default setup.
use_hard_l2g_mask: If True, long tokens only attend to the global token
corresponding to their block. If False, long tokens attend to all the
non-padding global tokens. False is the default setup.
global_token_id: Integer id to use for global tokens. The default is `1`,
which was the value used during ETC pre-training.
name: A name for the operation (optional).
Returns:
A tuple with the following 2 elements:
side_inputs: A `GlobalLocalTransformerSideInputs` object containing all
side input tensors.
global_token_ids: <int32>[batch_size, global_seq_len] Tensor of global
tokens ids suitable to pass into `EtcModel`. All global tokens will
use the same `global_token_id`, except for padding tokens.
"""
if num_tokens_per_block <= 0:
raise ValueError('`num_tokens_per_block` must be positive.')
with tf.name_scope(name or 'make_fixed_block_side_inputs'):
input_mask = tf.convert_to_tensor(input_mask)
batch_size = tensor_utils.get_shape_list(input_mask)[0]
long_seq_len = input_mask.shape.as_list()[1]
if long_seq_len is None:
raise ValueError('`long_seq_len` must be statically known.')
global_seq_len = (long_seq_len + num_tokens_per_block -
1) // num_tokens_per_block
# [batch_size, global_seq_len, num_tokens_per_block]
blocked_input_mask = tensor_utils.split_into_blocks(
input_mask, block_len=num_tokens_per_block, axis=-1)
assert blocked_input_mask.shape.as_list()[1] == global_seq_len
# [batch_size, global_seq_len]
global_input_mask = tf.minimum(
tf.reduce_max(blocked_input_mask, axis=-1), 1)
# [long_seq_len]
sentence_ids = tf.repeat(
tf.range(global_seq_len, dtype=tf.int32),
num_tokens_per_block)[:long_seq_len]
# [batch_size, long_seq_len]
sentence_ids = tf.broadcast_to(sentence_ids, [batch_size, long_seq_len])
side_inputs = make_global_local_transformer_side_inputs_from_example_ids(
long_example_ids=input_mask,
global_example_ids=global_input_mask,
sentence_ids=sentence_ids,
local_radius=local_radius,
relative_pos_max_distance=relative_pos_max_distance,
use_hard_g2l_mask=use_hard_g2l_mask,
use_hard_l2g_mask=use_hard_l2g_mask)
global_token_ids = global_token_id * global_input_mask
return side_inputs, global_token_ids
def add_side_input_features(
model_config: modeling.EtcConfig,
features: Mapping[Text, tf.Tensor]) -> Dict[Text, tf.Tensor]:
"""Replaces raw input features with derived ETC side inputs.
This function is meant to be called as part of a Dataset pipeline.
Args:
model_config: An `EtcConfig`.
features: A dictionary of Tensor features, crucially including
`long_breakpoints`, `global_breakpoints`, `sentence_ids`.
Returns:
A new `features` dictionary with global-local transformer side inputs.
"""
features = dict(features)
side_inputs = make_global_local_transformer_side_inputs(
long_breakpoints=features['long_breakpoints'],
global_breakpoints=features['global_breakpoints'],
sentence_ids=features['sentence_ids'],
local_radius=model_config.local_radius,
relative_pos_max_distance=model_config.relative_pos_max_distance,
use_hard_g2l_mask=model_config.use_hard_g2l_mask,
use_hard_l2g_mask=model_config.use_hard_l2g_mask)
features.update(side_inputs.to_dict(exclude_none_values=True))
return features
def get_assignment_map_from_checkpoint(
variables: Sequence[tf.Variable],
ckpt_path: Text,
variable_scope: Text = '',
ckpt_variable_scope: Text = '') -> Tuple[Dict[Text, Text], List[Text]]:
"""Gets the mapping from checkpoint variable names to `variable` names.
Computes the *intersection* of `variables` (under `variable_scope`) and
checkpoint variables (under `ckpt_variable_scope`) and gets the name
mapping from the latter to the former.
Args:
variables: The list of Tensorflow variables one aims to initialize.
ckpt_path: Path to the checkpoint to load `variables` from.
variable_scope: The scope of `variables` to initialize. `Variables` outside
this scope will be ignored. If "", use all `variables`; otherwise it
should end with '/'.
ckpt_variable_scope: The scope of checkpoint variables to initialize from.
Checkpoint variables outside this scope will be ignored. If "", use all
`variables`; otherwise it should end with '/'.
Returns:
assignment_map: Mapping from checkpoint variable names to `variable`.
Keys and values are matching variables under the `ckpt_variable_scope`
and `variable_scope` (sub-)trees.
initialized_variable_names: Names of `variables` that get matched to
checkpoint variables.
Raises:
ValueError if
(a) input scope name is not empty and doesn't end with "/"; or
(b) names of `variables` doesn't end with ':0' (unlikely to happen).
Example
Input:
variables: ["a/aa/aaa:0", "a/c/cc/ccc:0", "d/dd:0"]
ckpt_variables: ["b/aa/aaa", "b/f"]
variable_scope: "a/"
ckpt_variable_scope: "b/"
Output:
assignment_map: {"b/aa/aaa": <tf.Variable "a/aa/aaa:0">}
initialized_variable_names: ["a/aa/aaa:0"]
"""
if variable_scope and not variable_scope.endswith('/'):
raise ValueError('{} should end with "/".'.format(variable_scope))
if ckpt_variable_scope and not ckpt_variable_scope.endswith('/'):
raise ValueError('{} should end with "/".'.format(ckpt_variable_scope))
variable_names_stripped = set()
for var in variables:
var_name = var.name
# Ignores `variables` outside scope.
# Note that all strings start with "".
if not var_name.startswith(variable_scope):
continue
# Names of variables from Tensorflow API all have the suffix of ":0"
# while those from checkpoint don't. Here we strip the suffix out.
m = re.match('^(.*):\\d+$', var_name)
if m is not None:
var_name = m.group(1)
else:
raise ValueError(
'Variable name does not end with ":0": {}'.format(var_name))
# Strips the `variable_scope` prefix out.
var_name_stripped = var_name[len(variable_scope):]
if var_name_stripped:
variable_names_stripped.add(var_name_stripped)
var_name_to_variable = {var.name: var for var in variables}
assignment_map = collections.OrderedDict()
initialized_variable_names = []
for ckpt_var_name, _ in tf.train.list_variables(ckpt_path):
# Ignores checkpoint variables outside scope.
# Note that all strings start with "".
if not ckpt_var_name.startswith(ckpt_variable_scope):
continue
ckpt_var_name_stripped = ckpt_var_name[len(ckpt_variable_scope):]
if ckpt_var_name_stripped not in variable_names_stripped:
continue
var_name = variable_scope + ckpt_var_name_stripped + ':0'
assignment_map[ckpt_var_name] = var_name_to_variable[var_name]
initialized_variable_names.append(var_name)
return (assignment_map, initialized_variable_names)
def create_int_feature(values: Iterable[int]) -> tf.train.Feature:
"""Creates TensorFlow int features.
Args:
values: A sequence of integers.
Returns:
An entry of int tf.train.Feature.
"""
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
def create_float_feature(values: Iterable[float]) -> tf.train.Feature:
"""Creates TensorFlow float features.
Args:
values: A sequence of floats.
Returns:
An entry of float tf.train.Feature.
"""
return tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
def create_bytes_feature(values: Iterable[Text]) -> tf.train.Feature:
"""Creates TensorFlow string features.
Args:
values: A sequence of unicode strings.
Returns:
An entry of byte tf.train.Feature.
"""
values = [value.encode('utf-8') for value in values]
feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=values))
return feature
def get_feature(feature_name: Text,
example: tf.train.Example) -> tf.train.Feature:
"""Gets Tensorflow feature by name.
Args:
feature_name: The name of the feature.
example: A Tensorflow example.
Returns:
The Tensorflow feature with the given feature name in the example.
Raises:
ValueError: If the given feature name is not in the Tensorflow example.
"""
if feature_name in example.features.feature:
return example.features.feature[feature_name]
else:
raise ValueError('Feature name {} is not in the example {}'.format(
feature_name, example))
def get_repeated_values(
feature_name: Text,
example: tf.train.Example) -> MutableSequence[Union[bytes, float, int]]:
"""Gets the underlying repeated values of a feature by feature name.
The return type depends on which oneof `kind` is populated for the feature.
Whichever one is populated is returned.
Args:
feature_name: The name of the feature.
example: A Tensorflow example.
Returns:
The underlying repeated values for the given feature name in the example.
Modifying these repeated values will modify the example.
Raises:
ValueError: If the given feature name is not in the Tensorflow example or
none of the oneof `kind` fields is populated.
"""
feature = get_feature(feature_name, example)
which_oneof = feature.WhichOneof('kind')
if which_oneof is None:
raise ValueError(
'No field populated in oneof `kind` for feature name {} in example '
'{}'.format(feature_name, example))
return getattr(feature, which_oneof).value
| |
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various ops for TransformerVaeFlowPrior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import transformer_glow_layers_ops as gops
from tensor2tensor.models.transformer import transformer_decoder_layer
from tensor2tensor.models.transformer import transformer_encoder
from tensor2tensor.models.transformer import transformer_prepare_encoder
from tensor2tensor.utils import learning_rate as lr
from tensor2tensor.utils import mlperf_log
import tensorflow.compat.v1 as tf
def _mixed_precision_is_enabled(hparams):
"""Should be the same as in common_attention, avoiding import."""
activation_dtype = hparams.activation_dtype
weight_dtype = hparams.weight_dtype
return activation_dtype == tf.float16 and weight_dtype == tf.float32
def encoder(name, hparams, inputs, target_space):
"""Compute encoder outputs and attention bias."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
(encoder_input,
encoder_self_attention_bias,
encoder_decoder_attention_bias) = (
transformer_prepare_encoder(inputs, target_space, hparams))
encoder_input = tf.nn.dropout(encoder_input,
rate=hparams.layer_prepostprocess_dropout)
encoder_output = transformer_encoder(encoder_input,
encoder_self_attention_bias,
hparams)
return encoder_output, encoder_decoder_attention_bias
def transformer_decoder_layers(name,
n_layers,
decoder_input,
**kwargs):
"""A transformation block composed of transformer decoder layers."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
hparams = kwargs["hparams"]
outputs = decoder_input
with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
for layer_idx in range(n_layers):
outputs = transformer_decoder_layer(
decoder_input=outputs,
layer_idx=layer_idx,
**kwargs)
outputs = common_layers.layer_preprocess(outputs, hparams)
return outputs
def posterior(
name, hparams, targets, targets_mask, decoder_self_attention_bias,
**kwargs):
"""Compute mu and sigma for diagonal normal posterior q(z|x,y)."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
decoder_input = drop_2d(targets, hparams.mode, hparams.posterior_2d_dropout)
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
decoder_input = tf.nn.dropout(decoder_input,
rate=hparams.layer_prepostprocess_dropout)
decoder_output = transformer_decoder_layers(
"block",
n_layers=hparams.n_posterior_layers,
decoder_input=decoder_input,
hparams=hparams,
decoder_self_attention_bias=decoder_self_attention_bias,
**kwargs)
decoder_output = gops.dense_weightnorm(
"h2o_out", decoder_output, hparams.latent_size * 2, targets_mask,
init_scale=0.0, init=False)
return decoder_output
def cond_prior(
name, hparams, decoder_input, targets_mask, output_size,
decoder_self_attention_bias, init_scale=0.0, **kwargs):
"""Compute hidden states for parameters for conditional prior."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
decoder_input = tf.nn.dropout(decoder_input,
rate=hparams.layer_prepostprocess_dropout)
decoder_output = transformer_decoder_layers(
"block",
n_layers=hparams.n_posterior_layers,
decoder_input=decoder_input,
hparams=hparams,
decoder_self_attention_bias=decoder_self_attention_bias,
**kwargs)
decoder_output = gops.dense_weightnorm(
"h2o_out", decoder_output, output_size, targets_mask,
init_scale=init_scale, init=False)
return decoder_output
def decoder(name, latents, hparams, decoder_self_attention_bias, **kwargs):
"""Compute final hidden states for p(y|z,x)."""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
decoder_input = drop_2d(latents, hparams.mode, hparams.decoder_2d_dropout)
if hparams.pos_attn:
decoder_input = gops.positional_attention(
"pos_attn", decoder_input, decoder_self_attention_bias, hparams)
else:
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
if common_layers.shape_list(latents)[-1] != hparams.hidden_size:
decoder_input = gops.dense("lat2hid", latents, hparams.hidden_size)
decoder_output = transformer_decoder_layers(
"block",
n_layers=hparams.n_decoder_layers,
decoder_input=decoder_input,
hparams=hparams,
decoder_self_attention_bias=decoder_self_attention_bias,
**kwargs)
batch_size, targets_length = common_layers.shape_list(decoder_output)[:2]
decoder_output = tf.reshape(
decoder_output, [batch_size, targets_length, 1, hparams.hidden_size])
# Expand since t2t expects 4d tensors.
return decoder_output
def drop_2d(targets, mode, dropout_p):
"""Dropout in 2D."""
if dropout_p > 0 and mode == tf.estimator.ModeKeys.TRAIN:
batch_size, targets_length, hidden_size = common_layers.shape_list(targets)
mask_prob = tf.random_uniform(
shape=(batch_size, targets_length), minval=0.0, maxval=1.0)
mask_prob = tf.tile(mask_prob[..., tf.newaxis], [1, 1, hidden_size])
scale = 1 / (1 - dropout_p)
targets_noisy = tf.where(
mask_prob > dropout_p, targets * scale, tf.zeros_like(targets))
return targets_noisy
return targets
def sequence_mask(length, hparams):
dtype = get_dtype(hparams)
return tf.sequence_mask(length, dtype=dtype)
def get_padding(mask, hparams):
dtype = get_dtype(hparams)
return tf.cast(tf.equal(mask, 0.0), dtype=dtype)
def get_dtype(hparams):
if hparams.activation_dtype == "float32":
return tf.float32
elif hparams.activation_dtype == "float64":
return tf.float64
elif hparams.activation_dtype == "bfloat16":
return tf.bfloat16
else:
return None
def lenpred_mlp(name, logits, hidden_size, bound):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
logits = tf.layers.dense(logits, hidden_size)
logits = tf.nn.elu(logits)
logits = tf.layers.dense(logits, hidden_size)
logits = tf.nn.elu(logits)
logits = tf.layers.dense(logits, bound * 2 + 1)
return logits
def predict_target_lengths(
encoder_output, inputs_mask, hparams, length_diff=None):
"""Predict target lengths."""
bound = hparams.lendiff_bound
inputs_length = tf.cast(tf.reduce_sum(inputs_mask, 1), tf.int32)
targets_length = inputs_length
loss = None
if hparams.predict_target_length:
encoder_output = gops.reduce_mean_over_l(encoder_output, inputs_mask)
logits = tf.stop_gradient(encoder_output)
logits = lenpred_mlp("lenpred", logits, hparams.hidden_size, bound)
if length_diff is not None:
labels = tf.maximum(tf.minimum(length_diff, bound), -bound)
labels = tf.cast(labels + bound, tf.int32)
labels = tf.stop_gradient(labels)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(loss)
diff_pred = tf.argmax(logits, 1)
diff_pred = tf.cast(diff_pred - bound, tf.int32)
targets_length = inputs_length + diff_pred
targets_length = tf.maximum(targets_length, 1)
divi = 4
targets_length = tf.ceil(targets_length / divi) * divi
targets_length = tf.cast(targets_length, tf.int32)
return targets_length, loss
def lenpred_stats(targets_length_pred, targets_length):
lenpred_diff = tf.abs(targets_length_pred - tf.cast(targets_length, tf.int32))
lenpred_acc = tf.cast(tf.equal(lenpred_diff, 0), tf.float32)
lenpred_acc = tf.reduce_mean(lenpred_acc)
lenpred_acc5 = tf.cast(tf.less_equal(lenpred_diff, 5), tf.float32)
lenpred_acc5 = tf.reduce_mean(lenpred_acc5)
return lenpred_acc, lenpred_acc5
def save_log_loss(
hparams, targets_mask, numerator, denominator, log_q_z, log_abs_det,
log_p_z_base, z_q, lenpred_loss, targets_length_pred, targets_length):
"""Populate loss dictionary and summary."""
anneal, kl_mask = get_anneal_mask(hparams)
lenpred_acc, lenpred_acc5 = (
lenpred_stats(targets_length_pred, targets_length))
batch_length = tf.reduce_sum(targets_mask)
z_q_norm = gops.reduce_mean_over_bl(
tf.norm(z_q, axis=2, keepdims=True), targets_mask)[0]
log_q_z = gops.reduce_mean_over_bl_sum_over_c(log_q_z, targets_mask)
log_p_z_base = tf.reduce_sum(log_p_z_base, axis=0) / batch_length
log_abs_det = tf.reduce_sum(log_abs_det, axis=0) / batch_length
log_p_z_reg = gops.standard_normal_density(z_q, targets_mask, reduce_sum=True)
log_p_x = -1 * numerator / denominator
log_p_z = log_p_z_base + log_abs_det
kl = log_q_z - log_p_z
kl_reg = log_p_z - log_p_z_reg
elbo = log_p_x - kl
monitor = {
"elbo": elbo,
"kl": kl,
"kl_reg": kl_reg,
"log_p_x": log_p_x,
"log_q_z": log_q_z,
"log_p_z": log_p_z,
"log_p_z_base": log_p_z_base,
"log_abs_det": log_abs_det,
"anneal": anneal,
"z_q_norm": z_q_norm,
"lenpred_acc": lenpred_acc,
"lenpred_acc5": lenpred_acc5,
}
kl = kl * anneal
kl_reg = hparams.kl_reg * kl_reg * anneal
loss_dict = {
"training": -1 * log_p_x,
"kl": kl * kl_mask,
"kl_reg": kl_reg * kl_mask,
}
if lenpred_loss is not None:
monitor["lenpred_loss"] = lenpred_loss
loss_dict["lenpred_loss"] = lenpred_loss
return loss_dict, monitor
def get_anneal_mask(hparams):
"""Get anneal and kl mask."""
startup = hparams.kl_startup_steps
anneal = hparams.kl_anneal_steps
global_step = tf.train.get_global_step()
min_value = hparams.anneal_min_value
step = tf.maximum(global_step - startup, 0)
anneal = common_layers.inverse_lin_decay(
anneal, min_value=min_value, step=step)
kl_mask = tf.less(startup, tf.to_int32(global_step))
kl_mask = tf.cast(kl_mask, tf.float32)
return anneal, kl_mask
def embedding_to_non_padding(emb, dtype=tf.float32):
"""Calculates the padding mask based on which embeddings are not zero."""
emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1)
return tf.cast(tf.not_equal(emb_sum, 0.0), dtype=dtype)
def save_summary(monitor, name):
with tf.name_scope(name):
for key in list(monitor.keys()):
tf.summary.scalar(key, monitor[key])
def _global_step(hparams):
"""Adjust global step if a multi-step optimizer is used."""
step = tf.cast(tf.train.get_or_create_global_step(), tf.float32)
multiplier = hparams.optimizer_multistep_accumulate_steps
if not multiplier:
return step
tf.logging.info("Dividing global step by %d for multi-step optimizer."
% multiplier)
return step / tf.cast(multiplier, tf.float32)
def learning_rate_schedule(hparams):
"""Learning rate schedule based on hparams."""
mlperf_log.transformer_print(key=mlperf_log.OPT_LR, deferred=True)
mlperf_log.transformer_print(
key=mlperf_log.OPT_LR_WARMUP_STEPS,
value=hparams.learning_rate_warmup_steps)
step_num = _global_step(hparams)
# Simulate pretraining the encoder, decoder and posterior with the same
# learning rate schedule, and then restoring the parameters.
# using `warm_start_from` is not compatible with actnorm DDI on TPUs.
step_num = tf.where(
step_num < hparams.kl_startup_steps,
step_num,
step_num - hparams.kl_startup_steps)
schedule_string = hparams.learning_rate_schedule
names = schedule_string.split("*")
names = [name.strip() for name in names if name.strip()]
ret = tf.constant(1.0)
for name in names:
ret *= lr.learning_rate_factor(name, step_num, hparams)
return ret
def prepare_for_iw(x, k):
"""Prepare feature for importance sampling."""
batch_size = common_layers.shape_list(x)[0]
remaining_shape = common_layers.shape_list(x)[1:]
multiplier = [1] * x.shape.rank
x = tf.tile(x[tf.newaxis, ...], [k] + multiplier)
x = tf.reshape(x, [k * batch_size] + remaining_shape)
return x
def unprepare_for_iw(x, k):
"""Unprepare feature for importance sampling."""
batch_size_times_k = common_layers.shape_list(x)[0]
remaining_shape = common_layers.shape_list(x)[1:]
x = tf.reshape(x, [k, batch_size_times_k // k] + remaining_shape)
return x
def generic_loss(top_out, targets, model_hparams, vocab_size, weights_fn):
"""Compute loss numerator and denominator for one shard of output."""
del vocab_size # unused arg
logits = top_out
logits = common_attention.maybe_upcast(logits, hparams=model_hparams)
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.0)
return common_layers.padded_cross_entropy(
logits,
targets,
model_hparams.label_smoothing,
cutoff=cutoff,
weights_fn=weights_fn,
reduce_sum=False)
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fuzzer_stats."""
import datetime
import json
import os
import re
import unittest
import mock
import six
from clusterfuzz._internal.bot.tasks import fuzz_task
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.metrics import fuzzer_stats
from clusterfuzz._internal.tests.test_libs import helpers
from clusterfuzz._internal.tests.test_libs import test_utils
def sanitize_sql(s):
"""Sanitize the sql by removing all new lines and surrounding whitespace."""
s = re.sub('[ \\s\n\r]*\n[ \\s\n\r]*', ' ', s, flags=re.MULTILINE)
s = re.sub('\\([ \t]+', '(', s)
s = re.sub('[ \t]+\\)', ')', s)
return s.strip()
@test_utils.with_cloud_emulators('datastore')
class FuzzerStatsTest(unittest.TestCase):
"""Fuzzer stats tests."""
def setUp(self):
helpers.patch_environ(self)
data_types.Fuzzer(name='parent').put()
data_types.FuzzTarget(engine='parent', binary='child').put()
data_types.FuzzTargetJob(
engine='parent',
fuzz_target_name='parent_child',
job='test_job',
last_run=datetime.datetime.utcnow()).put()
helpers.patch(self, [
'clusterfuzz._internal.google_cloud_utils.storage.write_data',
])
def test_upload_testcase_run(self):
"""Tests uploading of TestcaseRun."""
testcase_run_0 = fuzzer_stats.TestcaseRun('fuzzer', 'job', 123,
1472846341.017923)
testcase_run_1 = fuzzer_stats.TestcaseRun('fuzzer', 'job', 123,
1472846341.017923)
testcase_run_0['stat'] = 1000
testcase_run_1['stat'] = 2000
fuzzer_stats.upload_stats(
[testcase_run_0, testcase_run_1], filename='upload.json')
self.mock.write_data.assert_called_once_with(
b'{"fuzzer": "fuzzer", "job": "job", "build_revision": 123, '
b'"timestamp": 1472846341.017923, "kind": "TestcaseRun", '
b'"stat": 1000}\n'
b'{"fuzzer": "fuzzer", "job": "job", "build_revision": 123, '
b'"timestamp": 1472846341.017923, "kind": "TestcaseRun", '
b'"stat": 2000}',
'gs://test-bigquery-bucket/fuzzer/TestcaseRun/date/20160902/upload.json'
)
def tests_upload_testcase_run_with_source(self):
"""Test uploading testcase run with source."""
os.environ['STATS_SOURCE'] = 'custom_source'
testcase_run = fuzzer_stats.TestcaseRun('fuzzer', 'job', 123,
1472846341.017923)
fuzzer_stats.upload_stats([testcase_run], filename='upload.json')
self.mock.write_data.assert_called_once_with(
b'{"fuzzer": "fuzzer", "job": "job", "build_revision": 123, '
b'"timestamp": 1472846341.017923, "kind": "TestcaseRun", '
b'"source": "custom_source"}',
'gs://test-bigquery-bucket/fuzzer/TestcaseRun/date/20160902/upload.json'
)
def test_upload_testcase_run_child(self):
"""Tests uploading of Testcaserun for a child fuzzer."""
testcase_run_0 = fuzzer_stats.TestcaseRun('parent_child', 'job', 123,
1472846341.017923)
testcase_run_0['stat'] = 1000
fuzzer_stats.upload_stats([testcase_run_0], filename='upload.json')
self.mock.write_data.assert_called_once_with(
b'{"fuzzer": "parent_child", "job": "job", "build_revision": 123, '
b'"timestamp": 1472846341.017923, "kind": "TestcaseRun", '
b'"stat": 1000}',
'gs://test-bigquery-bucket/parent/TestcaseRun/date/20160902/upload.json'
)
def test_upload_testcase_run_2_days(self):
"""Tests uploading TestcaseRuns that span multiple days."""
testcase_run_0 = fuzzer_stats.TestcaseRun('fuzzer', 'job', 123,
1472846341.017923)
testcase_run_1 = fuzzer_stats.TestcaseRun('fuzzer', 'job', 123,
1472846345.017923)
testcase_run_2 = fuzzer_stats.TestcaseRun('fuzzer', 'job', 123,
1472932741.017923)
testcase_run_3 = fuzzer_stats.TestcaseRun('fuzzer', 'job', 123,
1472932745.017923)
testcase_run_0['stat'] = 1000
testcase_run_1['stat'] = 2000
testcase_run_2['stat'] = 3000
testcase_run_3['stat'] = 4000
fuzzer_stats.upload_stats(
[testcase_run_0, testcase_run_1, testcase_run_2, testcase_run_3],
filename='upload.json')
expected_calls = [
mock.call(
b'{"fuzzer": "fuzzer", "job": "job", "build_revision": 123, '
b'"timestamp": 1472846341.017923, "kind": "TestcaseRun", '
b'"stat": 1000}\n'
b'{"fuzzer": "fuzzer", "job": "job", "build_revision": 123, '
b'"timestamp": 1472846345.017923, "kind": "TestcaseRun", '
b'"stat": 2000}',
'gs://test-bigquery-bucket/fuzzer/TestcaseRun/date/20160902/'
'upload.json'),
mock.call(
b'{"fuzzer": "fuzzer", "job": "job", "build_revision": 123, '
b'"timestamp": 1472932741.017923, "kind": "TestcaseRun", '
b'"stat": 3000}\n'
b'{"fuzzer": "fuzzer", "job": "job", "build_revision": 123, '
b'"timestamp": 1472932745.017923, "kind": "TestcaseRun", '
b'"stat": 4000}',
'gs://test-bigquery-bucket/fuzzer/TestcaseRun/date/20160903/'
'upload.json'),
]
six.assertCountEqual(self, self.mock.write_data.call_args_list,
expected_calls)
def test_upload_job_run(self):
"""Tests uploading of JobRun."""
crashes = [{
'is_new': False,
'count': 2,
'crash_type': 't1',
'crash_state': 's1',
'security_flag': True
}]
fuzzer_run = fuzzer_stats.JobRun('fuzzer', 'job', 123, 1472846341.017923,
9001, 0, 1, crashes)
fuzzer_stats.upload_stats([fuzzer_run], filename='upload.json')
self.assertEqual(1, self.mock.write_data.call_count)
self.assertEqual({
'kind': 'JobRun',
'known_crashes': 1,
'timestamp': 1472846341.017923,
'job': 'job',
'fuzzer': 'fuzzer',
'new_crashes': 0,
'build_revision': 123,
'testcases_executed': 9001,
'crashes': crashes
}, json.loads(self.mock.write_data.call_args[0][0]))
self.assertEqual(
'gs://test-bigquery-bucket/fuzzer/JobRun/date/20160902/upload.json',
self.mock.write_data.call_args[0][1])
@mock.patch('os.path.exists')
def test_testcase_run_read_from_disk(self, mock_path_exists):
"""Tests TestcaseRun deserialization."""
read_data = ('{"stat": 1000, "timestamp": 1472846341.017923, '
'"kind": "TestcaseRun", "job": "job", "fuzzer": "fuzzer", '
'"build_revision": 123}')
mock_path_exists.return_value = True
m = mock.mock_open(read_data=read_data)
with mock.patch('clusterfuzz._internal.metrics.fuzzer_stats.open', m):
testcase_run = fuzzer_stats.TestcaseRun.read_from_disk('fake_path')
self.assertIsNotNone(testcase_run)
self.assertEqual(testcase_run.kind, 'TestcaseRun')
self.assertEqual(testcase_run.fuzzer, 'fuzzer')
self.assertEqual(testcase_run.job, 'job')
self.assertEqual(testcase_run.build_revision, 123)
self.assertEqual(testcase_run.timestamp, 1472846341.017923)
self.assertEqual(testcase_run['stat'], 1000)
def test_testcase_run_write_to_disk(self):
"""Tests TestcaseRun serialization."""
testcase_run = fuzzer_stats.TestcaseRun('fuzzer', 'job', 123,
1472846341.017923)
m = mock.mock_open()
with mock.patch('clusterfuzz._internal.metrics.fuzzer_stats.open', m):
fuzzer_stats.TestcaseRun.write_to_disk(testcase_run, 'fake_path')
handle = m()
handle.write.assert_called_once_with(
'{"fuzzer": "fuzzer", "job": "job", "build_revision": 123, '
'"timestamp": 1472846341.017923, "kind": "TestcaseRun"}')
def test_job_run_from_json(self):
"""Tests JobRun deserialization."""
data = json.dumps({
'kind': 'JobRun',
'known_crashes': 1,
'timestamp': 1472846341.017923,
'job': 'job',
'fuzzer': 'fuzzer',
'new_crashes': 0,
'build_revision': 123,
'testcases_executed': 9001,
'crashes': [{
'test': 'crash'
}]
})
job_run = fuzzer_stats.BaseRun.from_json(data)
self.assertIsNotNone(job_run)
self.assertEqual(job_run.kind, 'JobRun')
self.assertEqual(job_run.fuzzer, 'fuzzer')
self.assertEqual(job_run.job, 'job')
self.assertEqual(job_run.build_revision, 123)
self.assertEqual(job_run.timestamp, 1472846341.017923)
self.assertEqual(job_run['new_crashes'], 0)
self.assertEqual(job_run['known_crashes'], 1)
self.assertEqual(job_run['testcases_executed'], 9001)
self.assertEqual(job_run['crashes'], [{'test': 'crash'}])
def test_fuzz_task_upload_job_run_stats(self):
"""Tests that fuzz_task.upload_job_run_stats uploads stats."""
groups = [
mock.Mock(
crashes=[mock.Mock(), mock.Mock()],
main_crash=mock.Mock(
crash_type='t1', crash_state='s1', security_flag=True)),
mock.Mock(
crashes=[mock.Mock()],
main_crash=mock.Mock(
crash_type='t2', crash_state='s2', security_flag=False)),
]
groups[0].is_new.return_value = False
groups[1].is_new.return_value = True
fuzz_task.upload_job_run_stats('fuzzer', 'job', 123, 1472846341.017923, 1,
2, 1337, groups)
self.assertEqual(1, self.mock.write_data.call_count)
self.assertEqual({
'kind':
'JobRun',
'known_crashes':
2,
'timestamp':
1472846341.017923,
'job':
'job',
'fuzzer':
'fuzzer',
'new_crashes':
1,
'build_revision':
123,
'testcases_executed':
1337,
'crashes': [
{
'is_new': False,
'count': 2,
'crash_type': 't1',
'crash_state': 's1',
'security_flag': True
},
{
'is_new': True,
'count': 1,
'crash_type': 't2',
'crash_state': 's2',
'security_flag': False
},
]
}, json.loads(self.mock.write_data.call_args[0][0]))
@test_utils.with_cloud_emulators('datastore')
class BigQueryStatsTests(unittest.TestCase):
"""BigQuery stats tests."""
def setUp(self):
data_types.Fuzzer(name='parent').put()
data_types.FuzzTarget(engine='parent', binary='child').put()
data_types.FuzzTargetJob(
engine='parent',
fuzz_target_name='parent_child',
job='test_job',
last_run=datetime.datetime.utcnow()).put()
def test_parse_stats_column_fields(self):
"""Tests stats column parsing."""
fields = fuzzer_stats.parse_stats_column_fields(
'sum(t.abc), avg(j.abc) as bcd, custom(j.def) as def, '
'_EDGE_COV, _FUNC_COV as 123,\n'
'_COV_REPORT as blahblah, _CORPUS_SIZE as corpus_size, '
'_CORPUS_BACKUP as corpus_backup')
self.assertEqual(len(fields), 8)
self.assertIsInstance(fields[0], fuzzer_stats.QueryField)
self.assertEqual(fields[0].aggregate_function, 'sum')
self.assertFalse(fields[0].is_custom())
self.assertEqual(fields[0].name, 'abc')
self.assertEqual(fields[0].table_alias, 't')
# select_alias is defauled to name.
self.assertEqual(fields[0].select_alias, 'abc')
self.assertIsInstance(fields[1], fuzzer_stats.QueryField)
self.assertEqual(fields[1].aggregate_function, 'avg')
self.assertFalse(fields[1].is_custom())
self.assertEqual(fields[1].name, 'abc')
self.assertEqual(fields[1].table_alias, 'j')
self.assertEqual(fields[1].select_alias, 'bcd')
self.assertIsInstance(fields[2], fuzzer_stats.QueryField)
self.assertEqual(fields[2].aggregate_function, 'custom')
self.assertTrue(fields[2].is_custom())
self.assertEqual(fields[2].name, 'def')
self.assertEqual(fields[2].table_alias, 'j')
self.assertEqual(fields[2].select_alias, 'def')
self.assertIsInstance(fields[3], fuzzer_stats.BuiltinFieldSpecifier)
self.assertEqual(fields[3].name, '_EDGE_COV')
self.assertEqual(fields[3].field_class(), fuzzer_stats.CoverageField)
self.assertIsNone(fields[3].alias)
self.assertIsInstance(fields[4], fuzzer_stats.BuiltinFieldSpecifier)
self.assertEqual(fields[4].name, '_FUNC_COV')
self.assertEqual(fields[4].field_class(), fuzzer_stats.CoverageField)
self.assertEqual(fields[4].alias, '123')
self.assertIsInstance(fields[5], fuzzer_stats.BuiltinFieldSpecifier)
self.assertEqual(fields[5].name, '_COV_REPORT')
self.assertEqual(fields[5].field_class(), fuzzer_stats.CoverageReportField)
self.assertEqual(fields[5].alias, 'blahblah')
self.assertIsInstance(fields[6], fuzzer_stats.BuiltinFieldSpecifier)
self.assertEqual(fields[6].name, '_CORPUS_SIZE')
self.assertEqual(fields[6].field_class(), fuzzer_stats.CorpusSizeField)
self.assertEqual(fields[6].alias, 'corpus_size')
self.assertIsInstance(fields[7], fuzzer_stats.BuiltinFieldSpecifier)
self.assertEqual(fields[7].name, '_CORPUS_BACKUP')
self.assertEqual(fields[7].field_class(), fuzzer_stats.CorpusBackupField)
self.assertEqual(fields[7].alias, 'corpus_backup')
# Test that invalid fields are ignored.
fields = fuzzer_stats.parse_stats_column_fields(
'sum(abc) , min(t.bcd) as bcd , '
'sum(t.def) as "1, _EDGE_COV as ""1"')
self.assertEqual(len(fields), 1)
self.assertIsInstance(fields[0], fuzzer_stats.QueryField)
self.assertEqual(fields[0].aggregate_function, 'min')
self.assertEqual(fields[0].name, 'bcd')
self.assertEqual(fields[0].table_alias, 't')
self.assertEqual(fields[0].select_alias, 'bcd')
def test_query_job_day(self):
"""Tests querying for JobRuns grouped by day."""
fields = fuzzer_stats.parse_stats_column_fields(
fuzzer_stats.JobQuery.DEFAULT_FIELDS)
query = fuzzer_stats.JobQuery('fuzzer_name', ['job_type', 'job_type2'],
fields,
fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
datetime.date(2016, 10, 1),
datetime.date(2016, 10, 7))
self.assertEqual(
sanitize_sql(query.build()),
sanitize_sql("""
WITH
JobRunWithConcatedCrashes AS (
SELECT
TIMESTAMP_TRUNC(
TIMESTAMP_SECONDS(CAST(timestamp AS INT64)), DAY, "UTC"
) as date,
sum(testcases_executed) as testcases_executed,
ARRAY_CONCAT_AGG(crashes) AS crashes
FROM
`test-clusterfuzz`.fuzzer_name_stats.JobRun
WHERE
(
_PARTITIONTIME BETWEEN
TIMESTAMP_SECONDS(1475280000) AND TIMESTAMP_SECONDS(1475798400)
) AND (
job = \'job_type\' OR job = \'job_type2\'
)
GROUP BY date
),
JobRunWithUniqueCrashes AS (
SELECT
* EXCEPT(crashes),
ARRAY(
SELECT AS STRUCT
crash.crash_type,
crash.crash_state,
crash.security_flag,
SUM(count) AS count,
MAX(crash.is_new) AS is_new
FROM
UNNEST(crashes) AS crash
GROUP BY
crash.crash_type,
crash.crash_state,
crash.security_flag
) AS crashes
FROM
JobRunWithConcatedCrashes
),
JobRunWithSummary AS (
SELECT
* EXCEPT(crashes),
(
SELECT AS STRUCT
IFNULL(SUM(crash.count), 0) AS total,
COUNTIF(crash.is_new) AS unique_new,
COUNT(crash) AS unique
FROM
UNNEST(crashes) AS crash
) AS crash_count
FROM
JobRunWithUniqueCrashes
)
SELECT
* EXCEPT(crash_count),
crash_count.total AS total_crashes,
crash_count.unique_new AS new_crashes,
(crash_count.unique - crash_count.unique_new) AS known_crashes
FROM
JobRunWithSummary
"""))
def test_query_job_revision(self):
"""Tests querying for JobRuns grouped by revision."""
fields = fuzzer_stats.parse_stats_column_fields(
fuzzer_stats.JobQuery.DEFAULT_FIELDS)
query = fuzzer_stats.JobQuery('fuzzer_name', ['job_type', 'job_type2'],
fields,
fuzzer_stats.QueryGroupBy.GROUP_BY_REVISION,
datetime.date(2016, 10, 1),
datetime.date(2016, 10, 7))
self.assertEqual(
sanitize_sql(query.build()),
sanitize_sql("""
WITH
JobRunWithConcatedCrashes AS (
SELECT
build_revision,
sum(testcases_executed) as testcases_executed,
ARRAY_CONCAT_AGG(crashes) AS crashes
FROM
`test-clusterfuzz`.fuzzer_name_stats.JobRun
WHERE
(
_PARTITIONTIME BETWEEN
TIMESTAMP_SECONDS(1475280000) AND TIMESTAMP_SECONDS(1475798400)
) AND (
job = \'job_type\' OR job = \'job_type2\'
)
GROUP BY build_revision
),
JobRunWithUniqueCrashes AS (
SELECT
* EXCEPT(crashes),
ARRAY(
SELECT AS STRUCT
crash.crash_type,
crash.crash_state,
crash.security_flag,
SUM(count) AS count,
MAX(crash.is_new) AS is_new
FROM
UNNEST(crashes) AS crash
GROUP BY
crash.crash_type,
crash.crash_state,
crash.security_flag
) AS crashes
FROM
JobRunWithConcatedCrashes
),
JobRunWithSummary AS (
SELECT
* EXCEPT(crashes),
(
SELECT AS STRUCT
IFNULL(SUM(crash.count), 0) AS total,
COUNTIF(crash.is_new) AS unique_new,
COUNT(crash) AS unique
FROM
UNNEST(crashes) AS crash
) AS crash_count
FROM
JobRunWithUniqueCrashes
)
SELECT
* EXCEPT(crash_count),
crash_count.total AS total_crashes,
crash_count.unique_new AS new_crashes,
(crash_count.unique - crash_count.unique_new) AS known_crashes
FROM
JobRunWithSummary
"""))
def test_query_job_fuzzer(self):
"""Tests querying for JobRuns grouped by fuzzer."""
fields = fuzzer_stats.parse_stats_column_fields(
fuzzer_stats.JobQuery.DEFAULT_FIELDS)
query = fuzzer_stats.JobQuery('fuzzer_name', ['job_type', 'job_type2'],
fields,
fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER,
datetime.date(2016, 10, 1),
datetime.date(2016, 10, 7))
self.assertEqual(
sanitize_sql(query.build()),
sanitize_sql("""
WITH
JobRunWithConcatedCrashes AS (
SELECT
fuzzer,
sum(testcases_executed) as testcases_executed,
ARRAY_CONCAT_AGG(crashes) AS crashes
FROM
`test-clusterfuzz`.fuzzer_name_stats.JobRun
WHERE
(
_PARTITIONTIME BETWEEN TIMESTAMP_SECONDS(1475280000)
AND TIMESTAMP_SECONDS(1475798400)
) AND (
job = \'job_type\' OR job = \'job_type2\'
)
GROUP BY fuzzer
),
JobRunWithUniqueCrashes AS (
SELECT
* EXCEPT(crashes),
ARRAY(
SELECT AS STRUCT
crash.crash_type,
crash.crash_state,
crash.security_flag,
SUM(count) AS count,
MAX(crash.is_new) AS is_new
FROM
UNNEST(crashes) AS crash
GROUP BY
crash.crash_type,
crash.crash_state,
crash.security_flag
) AS crashes
FROM
JobRunWithConcatedCrashes
),
JobRunWithSummary AS (
SELECT
* EXCEPT(crashes),
(
SELECT AS STRUCT
IFNULL(SUM(crash.count), 0) AS total,
COUNTIF(crash.is_new) AS unique_new,
COUNT(crash) AS unique
FROM
UNNEST(crashes) AS crash
) AS crash_count
FROM
JobRunWithUniqueCrashes
)
SELECT
* EXCEPT(crash_count),
crash_count.total AS total_crashes,
crash_count.unique_new AS new_crashes,
(crash_count.unique - crash_count.unique_new) AS known_crashes
FROM
JobRunWithSummary
"""))
def test_table_query_join(self):
"""Tests basic table query involving a join."""
stats_columns = """
sum(j.testcases_executed) as testcases_executed,
custom(j.total_crashes) as total_crashes,
custom(j.new_crashes) as new_crashes,
custom(j.known_crashes) as known_crashes,
avg(t.average_exec_per_sec) as average_exec_per_sec
"""
query = fuzzer_stats.TableQuery('fuzzer_name', ['job_type', 'job_type2'],
stats_columns,
fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
datetime.date(2016, 10, 1),
datetime.date(2016, 10, 7))
self.assertEqual(
sanitize_sql(query.build()),
sanitize_sql("""
SELECT j.date, * EXCEPT(date) FROM (
WITH
JobRunWithConcatedCrashes AS (
SELECT
TIMESTAMP_TRUNC(
TIMESTAMP_SECONDS(CAST(timestamp AS INT64)), DAY, "UTC"
) as date,
sum(testcases_executed) as testcases_executed,
ARRAY_CONCAT_AGG(crashes) AS crashes
FROM
`test-clusterfuzz`.fuzzer_name_stats.JobRun
WHERE
(
_PARTITIONTIME BETWEEN TIMESTAMP_SECONDS(1475280000)
AND TIMESTAMP_SECONDS(1475798400)
) AND (
job = \'job_type\' OR job = \'job_type2\'
)
GROUP BY date
),
JobRunWithUniqueCrashes AS (
SELECT
* EXCEPT(crashes),
ARRAY(
SELECT AS STRUCT
crash.crash_type,
crash.crash_state,
crash.security_flag,
SUM(count) AS count,
MAX(crash.is_new) AS is_new
FROM
UNNEST(crashes) AS crash
GROUP BY
crash.crash_type,
crash.crash_state,
crash.security_flag
) AS crashes
FROM
JobRunWithConcatedCrashes
),
JobRunWithSummary AS (
SELECT
* EXCEPT(crashes),
(
SELECT AS STRUCT
IFNULL(SUM(crash.count), 0) AS total,
COUNTIF(crash.is_new) AS unique_new,
COUNT(crash) AS unique
FROM
UNNEST(crashes) AS crash
) AS crash_count
FROM
JobRunWithUniqueCrashes
)
SELECT
* EXCEPT(crash_count),
crash_count.total AS total_crashes,
crash_count.unique_new AS new_crashes,
(crash_count.unique - crash_count.unique_new) AS known_crashes
FROM
JobRunWithSummary
) as j INNER JOIN (
SELECT
TIMESTAMP_TRUNC(
TIMESTAMP_SECONDS(CAST(timestamp AS INT64)), DAY, "UTC"
) as date,
avg(average_exec_per_sec) as average_exec_per_sec
FROM `test-clusterfuzz`.fuzzer_name_stats.TestcaseRun
WHERE
(
_PARTITIONTIME BETWEEN TIMESTAMP_SECONDS(1475280000) AND
TIMESTAMP_SECONDS(1475798400)
) AND (
job = 'job_type' OR job = 'job_type2'
)
GROUP BY date
) as t ON j.date = t.date
"""))
def test_table_query_single(self):
"""Tests basic table query involving single subquery."""
stats_columns = """
sum(j.testcases_executed) as testcases_executed,
custom(j.total_crashes) as total_crashes,
custom(j.new_crashes) as new_crashes,
custom(j.known_crashes) as known_crashes
"""
query = fuzzer_stats.TableQuery('fuzzer_name', ['job_type'], stats_columns,
fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
datetime.date(2016, 10, 1),
datetime.date(2016, 10, 7))
self.assertEqual(
sanitize_sql(query.build()),
sanitize_sql("""
SELECT j.date, * EXCEPT(date) FROM (
WITH
JobRunWithConcatedCrashes AS (
SELECT
TIMESTAMP_TRUNC(
TIMESTAMP_SECONDS(CAST(timestamp AS INT64)), DAY, "UTC"
) as date,
sum(testcases_executed) as testcases_executed,
ARRAY_CONCAT_AGG(crashes) AS crashes
FROM
`test-clusterfuzz`.fuzzer_name_stats.JobRun
WHERE
(
_PARTITIONTIME BETWEEN TIMESTAMP_SECONDS(1475280000) AND
TIMESTAMP_SECONDS(1475798400)
) AND (
job = \'job_type\'
)
GROUP BY date
),
JobRunWithUniqueCrashes AS (
SELECT
* EXCEPT(crashes),
ARRAY(
SELECT AS STRUCT
crash.crash_type,
crash.crash_state,
crash.security_flag,
SUM(count) AS count,
MAX(crash.is_new) AS is_new
FROM
UNNEST(crashes) AS crash
GROUP BY
crash.crash_type,
crash.crash_state,
crash.security_flag
) AS crashes
FROM
JobRunWithConcatedCrashes
),
JobRunWithSummary AS (
SELECT
* EXCEPT(crashes),
(
SELECT AS STRUCT
IFNULL(SUM(crash.count), 0) AS total,
COUNTIF(crash.is_new) AS unique_new,
COUNT(crash) AS unique
FROM
UNNEST(crashes) AS crash
) AS crash_count
FROM
JobRunWithUniqueCrashes
)
SELECT
* EXCEPT(crash_count),
crash_count.total AS total_crashes,
crash_count.unique_new AS new_crashes,
(crash_count.unique - crash_count.unique_new) AS known_crashes
FROM
JobRunWithSummary
) as j
"""))
def test_table_query_group_fuzzer(self):
"""Tests table query grouping by fuzzer."""
stats_columns = """
sum(j.testcases_executed) as testcases_executed,
custom(j.total_crashes) as total_crashes,
custom(j.new_crashes) as new_crashes,
custom(j.known_crashes) as known_crashes,
avg(t.average_exec_per_sec) as average_exec_per_sec
"""
query = fuzzer_stats.TableQuery('parent_child', ['test_job', 'test_job2'],
stats_columns,
fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER,
datetime.date(2016, 10, 1),
datetime.date(2016, 10, 7))
self.assertEqual(
sanitize_sql(query.build()),
sanitize_sql("""
SELECT j.fuzzer, * EXCEPT(fuzzer) FROM (
WITH
JobRunWithConcatedCrashes AS (
SELECT
fuzzer,
sum(testcases_executed) as testcases_executed,
ARRAY_CONCAT_AGG(crashes) AS crashes
FROM
`test-clusterfuzz`.parent_stats.JobRun
WHERE
(
_PARTITIONTIME BETWEEN TIMESTAMP_SECONDS(1475280000) AND
TIMESTAMP_SECONDS(1475798400)
) AND (
job = \'test_job\' OR job = \'test_job2\'
)
AND fuzzer = \'parent_child\'
GROUP BY fuzzer
),
JobRunWithUniqueCrashes AS (
SELECT
* EXCEPT(crashes),
ARRAY(
SELECT AS STRUCT
crash.crash_type,
crash.crash_state,
crash.security_flag,
SUM(count) AS count,
MAX(crash.is_new) AS is_new
FROM
UNNEST(crashes) AS crash
GROUP BY
crash.crash_type,
crash.crash_state,
crash.security_flag
) AS crashes
FROM
JobRunWithConcatedCrashes
),
JobRunWithSummary AS (
SELECT
* EXCEPT(crashes),
(
SELECT AS STRUCT
IFNULL(SUM(crash.count), 0) AS total,
COUNTIF(crash.is_new) AS unique_new,
COUNT(crash) AS unique
FROM
UNNEST(crashes) AS crash
) AS crash_count
FROM
JobRunWithUniqueCrashes
)
SELECT
* EXCEPT(crash_count),
crash_count.total AS total_crashes,
crash_count.unique_new AS new_crashes,
(crash_count.unique - crash_count.unique_new) AS known_crashes
FROM
JobRunWithSummary
) as j INNER JOIN (
SELECT
fuzzer,
avg(average_exec_per_sec) as average_exec_per_sec
FROM `test-clusterfuzz`.parent_stats.TestcaseRun
WHERE
(
_PARTITIONTIME BETWEEN TIMESTAMP_SECONDS(1475280000) AND
TIMESTAMP_SECONDS(1475798400)
) AND (
job = \'test_job\' OR job = \'test_job2\'
) AND fuzzer = \'parent_child\'
GROUP BY fuzzer
) as t ON j.fuzzer = t.fuzzer
"""))
# Don't specify a job.
query = fuzzer_stats.TableQuery('parent_child', None, stats_columns,
fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER,
datetime.date(2016, 10, 1),
datetime.date(2016, 10, 7))
self.assertEqual(
sanitize_sql(query.build()),
sanitize_sql("""
SELECT j.fuzzer, * EXCEPT(fuzzer) FROM (
WITH
JobRunWithConcatedCrashes AS (
SELECT
fuzzer,
sum(testcases_executed) as testcases_executed,
ARRAY_CONCAT_AGG(crashes) AS crashes
FROM
`test-clusterfuzz`.parent_stats.JobRun
WHERE
(
_PARTITIONTIME BETWEEN TIMESTAMP_SECONDS(1475280000) AND
TIMESTAMP_SECONDS(1475798400)
)
AND fuzzer = \'parent_child\'
GROUP BY fuzzer
),
JobRunWithUniqueCrashes AS (
SELECT
* EXCEPT(crashes),
ARRAY(
SELECT AS STRUCT
crash.crash_type,
crash.crash_state,
crash.security_flag,
SUM(count) AS count,
MAX(crash.is_new) AS is_new
FROM
UNNEST(crashes) AS crash
GROUP BY
crash.crash_type,
crash.crash_state,
crash.security_flag
) AS crashes
FROM
JobRunWithConcatedCrashes
),
JobRunWithSummary AS (
SELECT
* EXCEPT(crashes),
(
SELECT AS STRUCT
IFNULL(SUM(crash.count), 0) AS total,
COUNTIF(crash.is_new) AS unique_new,
COUNT(crash) AS unique
FROM
UNNEST(crashes) AS crash
) AS crash_count
FROM
JobRunWithUniqueCrashes
)
SELECT
* EXCEPT(crash_count),
crash_count.total AS total_crashes,
crash_count.unique_new AS new_crashes,
(crash_count.unique - crash_count.unique_new) AS known_crashes
FROM
JobRunWithSummary
) as j INNER JOIN (
SELECT
fuzzer,
avg(average_exec_per_sec) as average_exec_per_sec
FROM `test-clusterfuzz`.parent_stats.TestcaseRun
WHERE
(
_PARTITIONTIME BETWEEN TIMESTAMP_SECONDS(1475280000) AND
TIMESTAMP_SECONDS(1475798400)
)
AND fuzzer = \'parent_child\'
GROUP BY fuzzer
) as t ON j.fuzzer = t.fuzzer
"""))
def test_table_query_group_job(self):
"""Tests grouping by job."""
stats_columns = """
sum(j.testcases_executed) as testcases_executed,
custom(j.total_crashes) as total_crashes,
custom(j.new_crashes) as new_crashes,
custom(j.known_crashes) as known_crashes,
avg(t.average_exec_per_sec) as average_exec_per_sec
"""
query = fuzzer_stats.TableQuery('parent_child', None, stats_columns,
fuzzer_stats.QueryGroupBy.GROUP_BY_JOB,
datetime.date(2016, 10, 1),
datetime.date(2016, 10, 7))
self.assertEqual(
sanitize_sql(query.build()),
sanitize_sql("""
SELECT j.job, * EXCEPT(job) FROM (
WITH
JobRunWithConcatedCrashes AS (
SELECT
job,
sum(testcases_executed) as testcases_executed,
ARRAY_CONCAT_AGG(crashes) AS crashes
FROM
`test-clusterfuzz`.parent_stats.JobRun
WHERE
(
_PARTITIONTIME BETWEEN TIMESTAMP_SECONDS(1475280000) AND
TIMESTAMP_SECONDS(1475798400)
)
AND fuzzer = \'parent_child\'
GROUP BY job
),
JobRunWithUniqueCrashes AS (
SELECT
* EXCEPT(crashes),
ARRAY(
SELECT AS STRUCT
crash.crash_type,
crash.crash_state,
crash.security_flag,
SUM(count) AS count,
MAX(crash.is_new) AS is_new
FROM
UNNEST(crashes) AS crash
GROUP BY
crash.crash_type,
crash.crash_state,
crash.security_flag
) AS crashes
FROM
JobRunWithConcatedCrashes
),
JobRunWithSummary AS (
SELECT
* EXCEPT(crashes),
(
SELECT AS STRUCT
IFNULL(SUM(crash.count), 0) AS total,
COUNTIF(crash.is_new) AS unique_new,
COUNT(crash) AS unique
FROM
UNNEST(crashes) AS crash
) AS crash_count
FROM
JobRunWithUniqueCrashes
)
SELECT
* EXCEPT(crash_count),
crash_count.total AS total_crashes,
crash_count.unique_new AS new_crashes,
(crash_count.unique - crash_count.unique_new) AS known_crashes
FROM
JobRunWithSummary
) as j INNER JOIN (
SELECT
job,
avg(average_exec_per_sec) as average_exec_per_sec
FROM `test-clusterfuzz`.parent_stats.TestcaseRun
WHERE
(
_PARTITIONTIME BETWEEN TIMESTAMP_SECONDS(1475280000) AND
TIMESTAMP_SECONDS(1475798400)
) AND
fuzzer = \'parent_child\'
GROUP BY job
) as t ON j.job = t.job
"""))
def test_table_query_group_time(self):
"""Tests table query grouping by fuzzer."""
stats_columns = """
sum(j.testcases_executed) as testcases_executed,
custom(j.total_crashes) as total_crashes,
custom(j.new_crashes) as new_crashes,
custom(j.known_crashes) as known_crashes,
avg(t.average_exec_per_sec) as average_exec_per_sec
"""
query = fuzzer_stats.TableQuery('parent_child', ['test_job', 'test_job2'],
stats_columns,
fuzzer_stats.QueryGroupBy.GROUP_BY_TIME,
datetime.date(2016, 10, 1),
datetime.date(2016, 10, 7))
self.assertEqual(
sanitize_sql(query.build()),
sanitize_sql("""
SELECT
t.time, * EXCEPT(time)
FROM
(
SELECT
TIMESTAMP_SECONDS(
CAST(timestamp AS INT64)
) as time,
avg(average_exec_per_sec) as average_exec_per_sec
FROM `test-clusterfuzz`.parent_stats.TestcaseRun
WHERE
(
_PARTITIONTIME BETWEEN TIMESTAMP_SECONDS(1475280000) AND
TIMESTAMP_SECONDS(1475798400)
) AND
(
job = \'test_job\' OR job = \'test_job2\'
) AND
fuzzer = \'parent_child\'
GROUP BY time
) as t
"""))
def test_query_invalid_names(self):
"""Tests passing invalid fuzzer/job names."""
stats_columns = ('sum(j.testcases_executed) as testcases_executed, '
'sum(j.new_crashes) as new_crashes, '
'sum(j.known_crashes) as known_crashes, '
'avg(t.average_exec_per_sec) as average_exec_per_sec ')
with self.assertRaises(fuzzer_stats.FuzzerStatsException):
fuzzer_stats.TableQuery('fuzzer_n\'ame$', ['job_type'], stats_columns,
fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
datetime.date(2016, 10, 1),
datetime.date(2016, 10, 7))
@test_utils.with_cloud_emulators('datastore')
class BuiltinFieldTests(unittest.TestCase):
"""Builtin field tests."""
def setUp(self):
self.today = datetime.datetime.utcnow().date()
self.yesterday = self.today - datetime.timedelta(days=1)
cov_info = data_types.CoverageInformation(
fuzzer='fuzzer1', date=self.yesterday)
cov_info.edges_covered = 11
cov_info.edges_total = 30
cov_info.functions_covered = 10
cov_info.functions_total = 15
cov_info.html_report_url = 'https://report_for_fuzzer1/{}'.format(
data_types.coverage_information_date_to_string(self.yesterday))
cov_info.corpus_size_units = 20
cov_info.corpus_size_bytes = 200
cov_info.quarantine_size_units = 5
cov_info.quarantine_size_bytes = 50
cov_info.corpus_location = 'gs://corpus'
cov_info.corpus_backup_location = 'gs://corpus-backup/file.zip'
cov_info.quarantine_location = 'gs://quarantine'
cov_info.put()
cov_info = data_types.CoverageInformation(fuzzer='fuzzer2', date=self.today)
cov_info.edges_covered = 16
cov_info.edges_total = 33
cov_info.functions_covered = 58
cov_info.functions_total = 90
cov_info.html_report_url = 'https://report_for_fuzzer2/{}'.format(
data_types.coverage_information_date_to_string(self.today))
cov_info.corpus_size_units = 40
cov_info.corpus_size_bytes = 99
cov_info.quarantine_size_units = 6
cov_info.quarantine_size_bytes = 14
cov_info.corpus_location = 'gs://corpus'
cov_info.corpus_backup_location = 'gs://corpus-backup/file.zip'
cov_info.quarantine_location = 'gs://quarantine'
cov_info.put()
cov_info = data_types.CoverageInformation(
fuzzer='fuzzer2', date=self.yesterday)
cov_info.edges_covered = 15
cov_info.edges_total = 40
cov_info.functions_covered = 11
cov_info.functions_total = 16
cov_info.html_report_url = 'https://report_for_fuzzer2/{}'.format(
data_types.coverage_information_date_to_string(self.yesterday))
cov_info.corpus_size_units = 15
cov_info.corpus_size_bytes = 230
cov_info.quarantine_size_units = 8
cov_info.quarantine_size_bytes = 60
cov_info.corpus_location = 'gs://corpus'
cov_info.corpus_backup_location = 'gs://corpus-backup/file.zip'
cov_info.quarantine_location = 'gs://quarantine'
cov_info.put()
cov_info = data_types.CoverageInformation(fuzzer='fuzzer3', date=self.today)
cov_info.edges_covered = None
cov_info.edges_total = None
cov_info.functions_covered = None
cov_info.functions_total = None
cov_info.html_report_url = None
cov_info.corpus_size_units = 0
cov_info.corpus_size_bytes = 0
cov_info.quarantine_size_units = 0
cov_info.quarantine_size_bytes = 0
cov_info.corpus_location = 'gs://corpus'
cov_info.corpus_backup_location = 'gs://corpus-backup/file.zip'
cov_info.quarantine_location = 'gs://quarantine'
cov_info.put()
data_types.Job(
name='job1', environment_string='FUZZ_LOGS_BUCKET = bucket1').put()
data_types.Job(
name='job2', environment_string='FUZZ_LOGS_BUCKET = bucket2').put()
def test_constructors(self):
"""Test builtin field constructors."""
field = fuzzer_stats.BuiltinFieldSpecifier('_EDGE_COV').create()
self.assertIsInstance(field, fuzzer_stats.CoverageField)
field = fuzzer_stats.BuiltinFieldSpecifier('_FUNC_COV').create()
self.assertIsInstance(field, fuzzer_stats.CoverageField)
field = fuzzer_stats.BuiltinFieldSpecifier('_CORPUS_SIZE').create()
self.assertIsInstance(field, fuzzer_stats.CorpusSizeField)
field = fuzzer_stats.BuiltinFieldSpecifier('_CORPUS_BACKUP').create()
self.assertIsInstance(field, fuzzer_stats.CorpusBackupField)
field = fuzzer_stats.BuiltinFieldSpecifier('_QUARANTINE_SIZE').create()
self.assertIsInstance(field, fuzzer_stats.CorpusSizeField)
field = fuzzer_stats.BuiltinFieldSpecifier('_COV_REPORT').create()
self.assertIsInstance(field, fuzzer_stats.CoverageReportField)
def test_coverage_fields(self):
"""Test coverage fields."""
ctx = fuzzer_stats.CoverageFieldContext()
edge_field = fuzzer_stats.BuiltinFieldSpecifier('_EDGE_COV').create(ctx)
func_field = fuzzer_stats.BuiltinFieldSpecifier('_FUNC_COV').create(ctx)
data = edge_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER, 'fuzzer1')
self.assertEqual(data.value, '36.67% (11/30)')
self.assertAlmostEqual(data.sort_key, 36.666666666666664)
self.assertIsNone(data.link)
data = func_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER, 'fuzzer2')
self.assertEqual(data.value, '64.44% (58/90)')
self.assertAlmostEqual(data.sort_key, 64.44444444444444)
self.assertIsNone(data.link)
ctx = fuzzer_stats.CoverageFieldContext(fuzzer='fuzzer2')
edge_field = fuzzer_stats.BuiltinFieldSpecifier('_EDGE_COV').create(ctx)
data = edge_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_DAY, self.today)
self.assertEqual(data.value, '48.48% (16/33)')
self.assertAlmostEqual(data.sort_key, 48.484848484848484)
self.assertIsNone(data.link)
data = edge_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
self.yesterday)
self.assertEqual(data.value, '37.50% (15/40)')
self.assertAlmostEqual(data.sort_key, 37.5)
self.assertIsNone(data.link)
def test_corpus_size_fields(self):
"""Test corpus size fields."""
ctx = fuzzer_stats.CoverageFieldContext()
corpus_field = fuzzer_stats.BuiltinFieldSpecifier('_CORPUS_SIZE').create(
ctx)
corpus_backup_field = fuzzer_stats.BuiltinFieldSpecifier(
'_CORPUS_BACKUP').create(ctx)
quarantine_field = fuzzer_stats.BuiltinFieldSpecifier(
'_QUARANTINE_SIZE').create(ctx)
data = corpus_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER,
'fuzzer1')
self.assertEqual(data.value, '20 (200 B)')
self.assertEqual(data.sort_key, 20)
self.assertEqual(data.link, 'gs://corpus')
data = corpus_backup_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER,
'fuzzer1')
self.assertEqual(data.value, 'Download')
self.assertEqual(data.sort_key, None)
self.assertEqual(data.link, 'gs://corpus-backup')
data = corpus_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER,
'fuzzer2')
self.assertEqual(data.value, '40 (99 B)')
self.assertEqual(data.sort_key, 40)
self.assertEqual(data.link, 'gs://corpus')
data = corpus_backup_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER,
'fuzzer2')
self.assertEqual(data.value, 'Download')
self.assertEqual(data.sort_key, None)
self.assertEqual(data.link, 'gs://corpus-backup')
data = quarantine_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER,
'fuzzer1')
self.assertEqual(data.value, '5 (50 B)')
self.assertEqual(data.sort_key, 5)
self.assertEqual(data.link, 'gs://quarantine')
data = quarantine_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER,
'fuzzer2')
self.assertEqual(data.value, '6 (14 B)')
self.assertEqual(data.sort_key, 6)
self.assertEqual(data.link, 'gs://quarantine')
ctx = fuzzer_stats.CoverageFieldContext('fuzzer2')
corpus_field = fuzzer_stats.BuiltinFieldSpecifier('_CORPUS_SIZE').create(
ctx)
corpus_backup_field = fuzzer_stats.BuiltinFieldSpecifier(
'_CORPUS_BACKUP').create(ctx)
data = corpus_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_DAY, self.today)
self.assertEqual(data.value, '40 (99 B)')
self.assertEqual(data.sort_key, 40)
self.assertEqual(data.link, 'gs://corpus')
data = corpus_backup_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
self.today)
self.assertEqual(data.value, 'Download')
self.assertEqual(data.sort_key, None)
self.assertEqual(data.link, 'gs://corpus-backup')
data = corpus_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
self.yesterday)
self.assertEqual(data.value, '15 (230 B)')
self.assertEqual(data.sort_key, 15)
self.assertEqual(data.link, 'gs://corpus')
data = corpus_backup_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
self.yesterday)
self.assertEqual(data.value, 'Download')
self.assertEqual(data.sort_key, None)
self.assertEqual(data.link, 'gs://corpus-backup')
def test_coverage_report_field(self):
"""Test coverage report field."""
ctx = fuzzer_stats.CoverageFieldContext()
coverage_report_field = fuzzer_stats.BuiltinFieldSpecifier(
'_COV_REPORT').create(ctx)
ctx = fuzzer_stats.CoverageFieldContext('fuzzer2')
coverage_report_field = fuzzer_stats.BuiltinFieldSpecifier(
'_COV_REPORT').create(ctx)
data = coverage_report_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
self.today)
self.assertEqual(data.value, 'Coverage')
self.assertEqual(
data.link, 'https://report_for_fuzzer2/{}'.format(
data_types.coverage_information_date_to_string(self.today)))
data = coverage_report_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
self.yesterday)
self.assertEqual(data.value, 'Coverage')
self.assertEqual(
data.link, 'https://report_for_fuzzer2/{}'.format(
data_types.coverage_information_date_to_string(self.yesterday)))
def test_coverage_field_invalid_info(self):
"""Test that coverage field works as expected with invalid coverage info."""
ctx = fuzzer_stats.CoverageFieldContext(fuzzer='fuzzer3')
edge_field = fuzzer_stats.BuiltinFieldSpecifier('_EDGE_COV').create(ctx)
data = edge_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_DAY, self.today)
self.assertIsNone(data)
def test_logs_field_by_fuzzer(self):
"""Test logs field (group by fuzzer)."""
ctx = fuzzer_stats.FuzzerRunLogsContext('fuzzer1', ['job1'])
logs_field = fuzzer_stats.BuiltinFieldSpecifier('_FUZZER_RUN_LOGS').create(
ctx)
data = logs_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER,
'fuzzer_child1')
self.assertEqual(data.value, 'Logs')
self.assertEqual(data.link, 'gs://bucket1/fuzzer_child1/job1')
def test_logs_field_by_day(self):
"""Test logs field (group by day)."""
ctx = fuzzer_stats.FuzzerRunLogsContext('fuzzer1', ['job1'])
logs_field = fuzzer_stats.BuiltinFieldSpecifier('_FUZZER_RUN_LOGS').create(
ctx)
data = logs_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
datetime.date(2016, 11, 18))
self.assertEqual(data.value, 'Logs')
self.assertEqual(data.link, 'gs://bucket1/fuzzer1/job1/2016-11-18')
def test_logs_field_by_job(self):
"""Test logs field (group by job)."""
ctx = fuzzer_stats.FuzzerRunLogsContext('fuzzer1', ['blah'])
logs_field = fuzzer_stats.BuiltinFieldSpecifier('_FUZZER_RUN_LOGS').create(
ctx)
data = logs_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_JOB, 'job2')
self.assertEqual(data.value, 'Logs')
self.assertEqual(data.link, 'gs://bucket2/fuzzer1/job2')
def test_performance_field_by_fuzzer(self):
"""Test performance field (group by fuzzer)."""
ctx = fuzzer_stats.FuzzerRunLogsContext('fuzzer1', ['job1'])
performance_field = (
fuzzer_stats.BuiltinFieldSpecifier('_PERFORMANCE_REPORT').create(ctx))
data = performance_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER,
'fuzzer_child1')
self.assertEqual(data.value, 'Performance')
expected_link = '/performance-report/fuzzer_child1/job1/latest'
self.assertEqual(data.link, expected_link)
def test_performance_field_by_day(self):
"""Test performance field (group by day)."""
ctx = fuzzer_stats.FuzzerRunLogsContext('fuzzer1', ['job1'])
performance_field = (
fuzzer_stats.BuiltinFieldSpecifier('_PERFORMANCE_REPORT').create(ctx))
data = performance_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_DAY,
datetime.date(2016, 11, 18))
self.assertEqual(data.value, 'Performance')
expected_link = '/performance-report/fuzzer1/job1/2016-11-18'
self.assertEqual(data.link, expected_link)
def test_performance_field_by_job(self):
"""Test performance field (group by job)."""
ctx = fuzzer_stats.FuzzerRunLogsContext('fuzzer1', ['blah'])
performance_field = (
fuzzer_stats.BuiltinFieldSpecifier('_PERFORMANCE_REPORT').create(ctx))
data = performance_field.get(fuzzer_stats.QueryGroupBy.GROUP_BY_JOB, 'job2')
self.assertEqual(data.value, 'Performance')
expected_link = '/performance-report/fuzzer1/job2/latest'
self.assertEqual(data.link, expected_link)
| |
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import time, json, warnings
from collections import deque
from .utils.generic_utils import Progbar
class CallbackList(object):
def __init__(self, callbacks=[], queue_length=10):
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def _set_params(self, params):
for callback in self.callbacks:
callback._set_params(params)
def _set_model(self, model):
for callback in self.callbacks:
callback._set_model(model)
def on_epoch_begin(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs={}):
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1:
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs={}):
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1:
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs={}):
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs={}):
for callback in self.callbacks:
callback.on_train_end(logs)
class Callback(object):
def __init__(self):
pass
def _set_params(self, params):
self.params = params
def _set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs={}):
pass
def on_epoch_end(self, epoch, logs={}):
pass
def on_batch_begin(self, batch, logs={}):
pass
def on_batch_end(self, batch, logs={}):
pass
def on_train_begin(self, logs={}):
pass
def on_train_end(self, logs={}):
pass
class BaseLogger(Callback):
def on_train_begin(self, logs={}):
self.verbose = self.params['verbose']
def on_epoch_begin(self, epoch, logs={}):
if self.verbose:
print('Epoch %d' % epoch)
self.progbar = Progbar(target=self.params['nb_sample'],
verbose=self.verbose)
self.seen = 0
self.totals = {}
def on_batch_begin(self, batch, logs={}):
if self.seen < self.params['nb_sample']:
self.log_values = []
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# skip progbar update for the last batch; will be handled by on_epoch_end
if self.verbose and self.seen < self.params['nb_sample']:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs={}):
for k in self.params['metrics']:
if k in self.totals:
self.log_values.append((k, self.totals[k] / self.seen))
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
class History(Callback):
def on_train_begin(self, logs={}):
self.epoch = []
self.history = {}
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs={}):
self.epoch.append(epoch)
for k, v in self.totals.items():
if k not in self.history:
self.history[k] = []
self.history[k].append(v / self.seen)
for k, v in logs.items():
if k not in self.history:
self.history[k] = []
self.history[k].append(v)
class ModelCheckpoint(Callback):
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False):
super(Callback, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.best = np.Inf
def on_epoch_end(self, epoch, logs={}):
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn("Can save best model only with %s available, skipping." % (self.monitor), RuntimeWarning)
else:
if current < self.best:
if self.verbose > 0:
print("Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %s"
% (epoch, self.monitor, self.best, current, self.filepath))
self.best = current
self.model.save_weights(self.filepath, overwrite=True)
else:
if self.verbose > 0:
print("Epoch %05d: %s did not improve" % (epoch, self.monitor))
else:
if self.verbose > 0:
print("Epoch %05d: saving model to %s" % (epoch, self.filepath))
self.model.save_weights(self.filepath, overwrite=True)
class EarlyStopping(Callback):
def __init__(self, monitor='val_loss', patience=0, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.best = np.Inf
self.wait = 0
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % (self.monitor), RuntimeWarning)
if current < self.best:
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
if self.verbose > 0:
print("Epoch %05d: early stopping" % (epoch))
self.model.stop_training = True
self.wait += 1
class RemoteMonitor(Callback):
def __init__(self, root='http://localhost:9000'):
self.root = root
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs={}):
import requests
send = {}
send['epoch'] = epoch
for k, v in self.totals.items():
send[k] = v / self.seen
for k, v in logs.items():
send[k] = v
try:
r = requests.post(self.root + '/publish/epoch/end/', {'data': json.dumps(send)})
except:
print('Warning: could not reach RemoteMonitor root server at ' + str(self.root))
class LearningRateScheduler(Callback):
'''LearningRateScheduler
schedule is a function that gets an epoch number as input and returns a new
learning rate as output.
'''
def __init__(self, schedule):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs={}):
self.model.optimizer.lr.set_value(self.schedule(epoch))
| |
from __future__ import print_function
import unittest
from robot.output.listeners import Listeners, LibraryListeners
from robot.output import LOGGER
from robot.utils.asserts import *
from robot.utils import JYTHON
from robot.running.outputcapture import OutputCapturer
LOGGER.unregister_console_logger()
class Mock(object):
def __getattr__(self, name):
return ''
class SuiteMock(Mock):
def __init__(self):
self.name = 'suitemock'
self.doc = 'somedoc'
self.status = 'PASS'
self.tests = self.suites = []
stat_message = 'stat message'
full_message = 'full message'
class TestMock(Mock):
def __init__(self):
self.name = 'testmock'
self.doc = 'cod'
self.tags = ['foo', 'bar']
self.message = 'Expected failure'
self.status = 'FAIL'
class KwMock(Mock):
def __init__(self):
self.name = 'kwmock'
self.args = ['a1', 'a2']
self.status = 'PASS'
self.type = 'kw'
class ListenOutputs(object):
def output_file(self, path):
self._out_file('Output', path)
def report_file(self, path):
self._out_file('Report', path)
def log_file(self, path):
self._out_file('Log', path)
def debug_file(self, path):
self._out_file('Debug', path)
def xunit_file(self, path):
self._out_file('XUnit', path)
def _out_file(self, name, path):
print('%s: %s' % (name, path))
class ListenAll(ListenOutputs):
ROBOT_LISTENER_API_VERSION = '2'
def start_suite(self, name, attrs):
print("SUITE START: %s '%s'" % (name, attrs['doc']))
def start_test(self, name, attrs):
print("TEST START: %s '%s' %s" % (name, attrs['doc'],
', '.join(attrs['tags'])))
def start_keyword(self, name, attrs):
args = [str(arg) for arg in attrs['args']]
print("KW START: %s %s" % (name, args))
def end_keyword(self, name, attrs):
print("KW END: %s" % attrs['status'])
def end_test(self, name, attrs):
if attrs['status'] == 'PASS':
print('TEST END: PASS')
else:
print("TEST END: %s %s" % (attrs['status'], attrs['message']))
def end_suite(self, name, attrs):
print('SUITE END: %s %s' % (attrs['status'], attrs['statistics']))
def close(self):
print('Closing...')
class TestListeners(unittest.TestCase):
listener_name = 'test_listeners.ListenAll'
stat_message = 'stat message'
def setUp(self):
self.listeners = Listeners([self.listener_name])
self.capturer = OutputCapturer()
def test_start_suite(self):
self.listeners.start_suite(SuiteMock())
self._assert_output("SUITE START: suitemock 'somedoc'")
def test_start_test(self):
self.listeners.start_test(TestMock())
self._assert_output("TEST START: testmock 'cod' foo, bar")
def test_start_keyword(self):
self.listeners.start_keyword(KwMock())
self._assert_output("KW START: kwmock ['a1', 'a2']")
def test_end_keyword(self):
self.listeners.end_keyword(KwMock())
self._assert_output("KW END: PASS")
def test_end_test(self):
self.listeners.end_test(TestMock())
self._assert_output('TEST END: FAIL Expected failure')
def test_end_suite(self):
self.listeners.end_suite(SuiteMock())
self._assert_output('SUITE END: PASS ' + self.stat_message)
def test_output_file(self):
self.listeners.output_file('output', 'path/to/output')
self._assert_output('Output: path/to/output')
def test_log_file(self):
self.listeners.output_file('log', 'path/to/log')
self._assert_output('Log: path/to/log')
def test_report_file(self):
self.listeners.output_file('report', 'path/to/report')
self._assert_output('Report: path/to/report')
def test_debug_file(self):
self.listeners.output_file('debug', 'path/to/debug')
self._assert_output('Debug: path/to/debug')
def test_xunit_file(self):
self.listeners.output_file('XUnit', 'path/to/xunit')
self._assert_output('XUnit: path/to/xunit')
def test_close(self):
self.listeners.close()
self._assert_output('Closing...')
def _assert_output(self, expected):
stdout, stderr = self.capturer._release()
assert_equal(stderr, '')
assert_equal(stdout.rstrip(), expected)
if JYTHON:
class TestJavaListeners(TestListeners):
listener_name = 'NewStyleJavaListener'
stat_message = 'stat message'
class TestAttributesAreNotAccessedUnnecessarily(unittest.TestCase):
def test_start_and_end_methods(self):
for listeners in [Listeners([]), LibraryListeners()]:
for name in dir(listeners):
if name.startswith(('start_', 'end_')):
method = getattr(listeners, name)
method(None)
def test_message_methods(self):
class Message(object):
level = 'INFO'
for listeners in [Listeners([]), LibraryListeners()]:
listeners.log_message(Message)
listeners.message(Message)
def test_some_methods_implemented(self):
class MyListener(object):
ROBOT_LISTENER_API_VERSION = 2
def end_suite(self, suite):
pass
libs = LibraryListeners()
libs.new_suite_scope()
libs.register([MyListener()], None)
for listeners in [Listeners([MyListener()]), libs]:
listeners.start_suite(None)
assert_raises(AttributeError, listeners.end_suite, None)
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six.moves.cPickle as pickle
import mock
import os
import unittest
from contextlib import closing
from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from test.unit import FakeLogger
from eventlet import spawn, Timeout, listen
from swift.common import utils
from swift.container import updater as container_updater
from swift.container.backend import ContainerBroker, DATADIR
from swift.common.ring import RingData
from swift.common.utils import normalize_timestamp
class TestContainerUpdater(unittest.TestCase):
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
self.testdir = os.path.join(mkdtemp(), 'tmp_test_container_updater')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
ring_file = os.path.join(self.testdir, 'account.ring.gz')
with closing(GzipFile(ring_file, 'wb')) as f:
pickle.dump(
RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'ip': '127.0.0.1', 'port': 12345,
'device': 'sda1', 'zone': 0},
{'id': 1, 'ip': '127.0.0.1', 'port': 12345,
'device': 'sda1', 'zone': 2}], 30),
f)
self.devices_dir = os.path.join(self.testdir, 'devices')
os.mkdir(self.devices_dir)
self.sda1 = os.path.join(self.devices_dir, 'sda1')
os.mkdir(self.sda1)
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
def test_creation(self):
cu = container_updater.ContainerUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '2',
'node_timeout': '5',
})
self.assertTrue(hasattr(cu, 'logger'))
self.assertTrue(cu.logger is not None)
self.assertEqual(cu.devices, self.devices_dir)
self.assertEqual(cu.interval, 1)
self.assertEqual(cu.concurrency, 2)
self.assertEqual(cu.node_timeout, 5)
self.assertTrue(cu.get_account_ring() is not None)
def test_run_once(self):
cu = container_updater.ContainerUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15',
'account_suppression_time': 0
})
cu.run_once()
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
cu.run_once()
self.assertTrue(os.path.exists(containers_dir))
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a',
container='c')
cb.initialize(normalize_timestamp(1), 0)
cu.run_once()
info = cb.get_info()
self.assertEqual(info['object_count'], 0)
self.assertEqual(info['bytes_used'], 0)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
cb.put_object('o', normalize_timestamp(2), 3, 'text/plain',
'68b329da9893e34099c7d8ad5cb9c940')
cu.run_once()
info = cb.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 3)
self.assertEqual(info['reported_object_count'], 0)
self.assertEqual(info['reported_bytes_used'], 0)
def accept(sock, addr, return_code):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEqual(inc.readline(),
'PUT /sda1/0/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assertTrue('x-put-timestamp' in headers)
self.assertTrue('x-delete-timestamp' in headers)
self.assertTrue('x-object-count' in headers)
self.assertTrue('x-bytes-used' in headers)
except BaseException as err:
import traceback
traceback.print_exc()
return err
return None
bindsock = listen(('127.0.0.1', 0))
def spawn_accepts():
events = []
for _junk in range(2):
sock, addr = bindsock.accept()
events.append(spawn(accept, sock, addr, 201))
return events
spawned = spawn(spawn_accepts)
for dev in cu.get_account_ring().devs:
if dev is not None:
dev['port'] = bindsock.getsockname()[1]
cu.run_once()
for event in spawned.wait():
err = event.wait()
if err:
raise err
info = cb.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 3)
self.assertEqual(info['reported_object_count'], 1)
self.assertEqual(info['reported_bytes_used'], 3)
@mock.patch('os.listdir')
def test_listdir_with_exception(self, mock_listdir):
e = OSError('permission_denied')
mock_listdir.side_effect = e
cu = container_updater.ContainerUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15',
'account_suppression_time': 0
})
cu.logger = FakeLogger()
paths = cu.get_paths()
self.assertEqual(paths, [])
log_lines = cu.logger.get_lines_for_level('error')
msg = ('ERROR: Failed to get paths to drive partitions: '
'permission_denied')
self.assertEqual(log_lines[0], msg)
@mock.patch('os.listdir', return_value=['foo', 'bar'])
def test_listdir_without_exception(self, mock_listdir):
cu = container_updater.ContainerUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15',
'account_suppression_time': 0
})
cu.logger = FakeLogger()
path = cu._listdir('foo/bar/')
self.assertEqual(path, ['foo', 'bar'])
log_lines = cu.logger.get_lines_for_level('error')
self.assertEqual(len(log_lines), 0)
def test_unicode(self):
cu = container_updater.ContainerUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15',
})
containers_dir = os.path.join(self.sda1, DATADIR)
os.mkdir(containers_dir)
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a',
container='\xce\xa9')
cb.initialize(normalize_timestamp(1), 0)
cb.put_object('\xce\xa9', normalize_timestamp(2), 3, 'text/plain',
'68b329da9893e34099c7d8ad5cb9c940')
def accept(sock, addr):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n')
out.flush()
inc.read()
except BaseException as err:
import traceback
traceback.print_exc()
return err
return None
bindsock = listen(('127.0.0.1', 0))
def spawn_accepts():
events = []
for _junk in range(2):
with Timeout(3):
sock, addr = bindsock.accept()
events.append(spawn(accept, sock, addr))
return events
spawned = spawn(spawn_accepts)
for dev in cu.get_account_ring().devs:
if dev is not None:
dev['port'] = bindsock.getsockname()[1]
cu.run_once()
for event in spawned.wait():
err = event.wait()
if err:
raise err
info = cb.get_info()
self.assertEqual(info['object_count'], 1)
self.assertEqual(info['bytes_used'], 3)
self.assertEqual(info['reported_object_count'], 1)
self.assertEqual(info['reported_bytes_used'], 3)
if __name__ == '__main__':
unittest.main()
| |
import pprint
import argparse
import os
import json
import sys
import re
import zk_utils
import seldon_utils
import spark_utils
gdata = {
'all_clients_node_path': "/all_clients",
}
def pp(o):
p = pprint.PrettyPrinter(indent=4)
p.pprint(o)
def getOpts(args):
parser = argparse.ArgumentParser(prog='seldon-cli client', description='Seldon Cli')
parser.add_argument('--action', help="the action to use", required=False, choices=['list','setup','processactions','processevents','zk_push',"zk_pull"])
parser.add_argument('--db-name', help="the name of the db", required=False)
parser.add_argument('--client-name', help="the name of the client", required=False)
parser.add_argument('--input-date-string', help="The date to process in YYYYMMDD format", required=False)
parser.add_argument('--set-js-key', help="the key to use for the js scope", required=False)
parser.add_argument('--set-all-key', help="the key to use for the all scope", required=False)
parser.add_argument('--set-all-secret', help="the secret to use for the all scope", required=False)
parser.add_argument('args', nargs=argparse.REMAINDER) # catch rest (non-options) as args
opts = parser.parse_args(args)
return opts
def get_data_fpath(zkroot, client):
return zkroot + gdata["all_clients_node_path"] + "/" + client + "/_data_"
def json_to_dict(json_data):
return json.loads(json_data)
def dict_to_json(d, expand=False):
return json.dumps(d, sort_keys=True, indent=4, separators=(',', ': ')) if expand else json.dumps(d, sort_keys=True, separators=(',',':'))
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def write_data_to_file(data_fpath, data):
json = dict_to_json(data, True) if isinstance(data,dict) else str(data)
mkdir_p(os.path.dirname(data_fpath))
f = open(data_fpath,'w')
f.write(json)
f.write('\n')
f.close()
print "Writing data to file[{data_fpath}]".format(**locals())
def is_existing_client(zkroot, client_name):
client_names = os.listdir(zkroot + gdata["all_clients_node_path"])
if client_name in client_names:
return True
else:
return False
def add_client(gopts,command_data,zk_client, zkroot, client_name, db_name, consumer_details=None):
data_fpath = zkroot + "/config/dbcp/_data_"
f = open(data_fpath)
json = f.read()
data = json_to_dict(json)
f.close()
db_info = None
for db_info_entry in data['dbs']:
if db_info_entry['name'] == db_name:
db_info = db_info_entry
break
if db_info == None:
print "Invalid db name[{db_name}]".format(**locals())
sys.exit(1)
dbSettings = {}
dbSettings["host"]=re.search('://(.*?):(.*?),',db_info["jdbc"]).groups()[0]
dbSettings["user"]=db_info["user"]
dbSettings["password"]=db_info["password"]
seldon_utils.addApiDb(db_name, dbSettings)
seldon_utils.addClientDb(client_name, dbSettings, consumer_details)
# write to local file
data_fpath = get_data_fpath(zkroot, client_name)
data = {'DB_JNDI_NAME': db_name}
write_data_to_file(data_fpath, data)
# write to zookeeper
node_path=gdata["all_clients_node_path"]+"/"+client_name
data_json = dict_to_json(data)
zk_utils.node_set(zk_client, node_path, data_json)
def add_client_dashboard(gopts,command_data,client_name):
if "grafana_endpoint" in command_data["conf_data"] and ('GRAFANA_ADMIN_PASSWORD' in os.environ):
admin_password = os.environ['GRAFANA_ADMIN_PASSWORD']
grafana = command_data["conf_data"]["grafana_endpoint"]
if "grafana_dashboard_template" in command_data["conf_data"]:
dashboard_template = command_data["conf_data"]["grafana_dashboard_template"]
else:
dashboard_template = None
if not (grafana is None or grafana == ""):
seldon_utils.add_grafana_dashboard(grafana,client_name,gopts.quiet,dashboard_template,admin_password)
def action_list(gopts,command_data, opts):
zkroot = command_data["zkdetails"]["zkroot"]
zk_client = command_data["zkdetails"]["zk_client"]
all_clients_fpath = zkroot + gdata["all_clients_node_path"]
if not os.path.isdir(all_clients_fpath):
# the dir for all_clients doesnt exist
if zk_client.exists(gdata["all_clients_node_path"]):
client_nodes = zk_client.get_children(gdata["all_clients_node_path"])
def write_node_value_to_file(node_path):
node_value = zk_utils.node_get(zk_client, node_path)
node_value = node_value.strip()
if zk_utils.is_json_data(node_value):
data = json_to_dict(node_value) if node_value != None and len(node_value)>0 else ""
else:
data = str(node_value)
data_fpath = zkroot + node_path + "/_data_"
write_data_to_file(data_fpath, data)
for client_node in client_nodes:
node_path=gdata["all_clients_node_path"]+"/"+client_node
write_node_value_to_file(node_path)
client_child_nodes = zk_client.get_children(gdata["all_clients_node_path"]+"/"+client_node)
for client_child_node in client_child_nodes:
node_path=gdata["all_clients_node_path"]+"/"+client_node+"/"+client_child_node
write_node_value_to_file(node_path)
if not os.path.isdir(all_clients_fpath):
print "No clients found!"
else:
for client in os.listdir(zkroot + gdata["all_clients_node_path"]):
data_fpath = get_data_fpath(zkroot, client)
f = open(data_fpath)
json = f.read()
data = json_to_dict(json)
f.close()
print "client[{client}]:".format(**locals())
DB_JNDI_NAME = data["DB_JNDI_NAME"] if isinstance(data, dict) and data.has_key("DB_JNDI_NAME") else ""
print " DB_JNDI_NAME: "+DB_JNDI_NAME
def action_setup(gopts,command_data, opts):
db_name_to_use = opts.db_name
client_name_to_setup = opts.client_name
if db_name_to_use == None:
print "Need db name to use"
sys.exit(1)
if client_name_to_setup == None:
print "Need client name to setup"
sys.exit(1)
consumer_details = {
'js_consumer_key': opts.set_js_key,
'all_consumer_key': opts.set_all_key,
'all_consumer_secret': opts.set_all_secret,
}
# check if this client exists
zkroot = command_data["zkdetails"]["zkroot"]
zk_client = command_data["zkdetails"]["zk_client"]
data_fpath = get_data_fpath(zkroot, client_name_to_setup)
if not os.path.isfile(data_fpath):
print "Trying to create the client"
add_client(gopts,command_data,zk_client, zkroot, client_name_to_setup, db_name_to_use, consumer_details)
add_client_dashboard(gopts,command_data,client_name_to_setup)
else:
add_client_dashboard(gopts,command_data,client_name_to_setup)
print "Client already exists!"
def action_zk_push(gopts,command_data,opts):
zkroot = command_data["zkdetails"]["zkroot"]
zk_client = command_data["zkdetails"]["zk_client"]
zk_utils.push_all_nodes(zk_client,zkroot)
def action_zk_pull(gopts,command_data,opts):
zkroot = command_data["zkdetails"]["zkroot"]
zk_client = command_data["zkdetails"]["zk_client"]
zk_utils.pull_all_nodes(zk_client,zkroot)
def action_processactions(gopts,command_data, opts):
zkroot = command_data["zkdetails"]["zkroot"]
def get_valid_client():
if not is_existing_client(zkroot, client_name):
print "Invalid client[{client_name}]".format(**locals())
sys.exit(1)
return client_name
def get_valid_input_date_string():
input_date_string = opts.input_date_string
if input_date_string == None:
print "Need input date string!"
sys.exit(1)
return input_date_string
client_name = opts.client_name
if client_name != None:
client_name = get_valid_client()
job_info = command_data["conf_data"]["processactions"]["job_info"]
if client_name != None:
job_info["cmd_args"].append("--single-client")
job_info["cmd_args"].append("%CLIENT_NAME%")
conf_data = command_data["conf_data"]
spark_executor_memory = conf_data["spark_executor_memory"]
spark_driver_memory = conf_data["spark_driver_memory"]
input_date_string = get_valid_input_date_string()
replacements = [
("%INPUT_DATE_STRING%", input_date_string),
("%SPARK_EXECUTOR_MEMORY%", spark_executor_memory),
("%SPARK_DRIVER_MEMORY%", spark_driver_memory)
]
def appy_replacements(item):
for rpair in replacements:
if rpair[1] != None:
item = item.replace(rpair[0],rpair[1])
return item
cmd_args = job_info["cmd_args"]
job_info["cmd_args"] = map(appy_replacements, cmd_args)
spark_utils.run_spark_job(command_data, job_info, client_name)
def action_processevents(gopts,command_data, opts):
zkroot = command_data["zkdetails"]["zkroot"]
def get_valid_client():
if not is_existing_client(zkroot, client_name):
print "Invalid client[{client_name}]".format(**locals())
sys.exit(1)
return client_name
def get_valid_input_date_string():
input_date_string = opts.input_date_string
if input_date_string == None:
print "Need input date string!"
sys.exit(1)
return input_date_string
client_name = opts.client_name
if client_name != None:
client_name = get_valid_client()
job_info = command_data["conf_data"]["processevents"]["job_info"]
if client_name != None:
job_info["cmd_args"].append("--single-client")
job_info["cmd_args"].append("%CLIENT_NAME%")
input_date_string = get_valid_input_date_string()
replacements = [
("%INPUT_DATE_STRING%", input_date_string),
]
def appy_replacements(item):
for rpair in replacements:
if rpair[1] != None:
item = item.replace(rpair[0],rpair[1])
return item
cmd_args = job_info["cmd_args"]
job_info["cmd_args"] = map(appy_replacements, cmd_args)
spark_utils.run_spark_job(command_data, job_info, client_name)
def cmd_client(gopts,command_data, command_args):
actions = {
"default" : action_list,
"list" : action_list,
"setup" : action_setup,
"processactions" : action_processactions,
"processevents" : action_processevents,
"zk_push" : action_zk_push,
"zk_pull" : action_zk_pull,
}
opts = getOpts(command_args)
action = opts.action
if action == None:
actions["default"](gopts,command_data, opts)
else:
if actions.has_key(action):
actions[action](gopts,command_data, opts)
else:
print "Invalid action[{}]".format(action)
| |
from __future__ import print_function
try:
from http.server import HTTPServer, SimpleHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
import os
import sys
try:
from urlparse import urlparse
from urllib import unquote
except ImportError:
from urllib.parse import urlparse, unquote
import posixpath
if sys.version_info.major >= 3:
from io import StringIO, BytesIO
else:
from io import BytesIO, BytesIO as StringIO
import re
import shutil
import threading
import time
import socket
import itertools
import Reporter
try:
import configparser
except ImportError:
import ConfigParser as configparser
###
# Various patterns matched or replaced by server.
kReportFileRE = re.compile('(.*/)?report-(.*)\\.html')
kBugKeyValueRE = re.compile('<!-- BUG([^ ]*) (.*) -->')
# <!-- REPORTPROBLEM file="crashes/clang_crash_ndSGF9.mi" stderr="crashes/clang_crash_ndSGF9.mi.stderr.txt" info="crashes/clang_crash_ndSGF9.mi.info" -->
kReportCrashEntryRE = re.compile('<!-- REPORTPROBLEM (.*?)-->')
kReportCrashEntryKeyValueRE = re.compile(' ?([^=]+)="(.*?)"')
kReportReplacements = []
# Add custom javascript.
kReportReplacements.append((re.compile('<!-- SUMMARYENDHEAD -->'), """\
<script language="javascript" type="text/javascript">
function load(url) {
if (window.XMLHttpRequest) {
req = new XMLHttpRequest();
} else if (window.ActiveXObject) {
req = new ActiveXObject("Microsoft.XMLHTTP");
}
if (req != undefined) {
req.open("GET", url, true);
req.send("");
}
}
</script>"""))
# Insert additional columns.
kReportReplacements.append((re.compile('<!-- REPORTBUGCOL -->'),
'<td></td><td></td>'))
# Insert report bug and open file links.
kReportReplacements.append((re.compile('<!-- REPORTBUG id="report-(.*)\\.html" -->'),
('<td class="Button"><a href="report/\\1">Report Bug</a></td>' +
'<td class="Button"><a href="javascript:load(\'open/\\1\')">Open File</a></td>')))
kReportReplacements.append((re.compile('<!-- REPORTHEADER -->'),
'<h3><a href="/">Summary</a> > Report %(report)s</h3>'))
kReportReplacements.append((re.compile('<!-- REPORTSUMMARYEXTRA -->'),
'<td class="Button"><a href="report/%(report)s">Report Bug</a></td>'))
# Insert report crashes link.
# Disabled for the time being until we decide exactly when this should
# be enabled. Also the radar reporter needs to be fixed to report
# multiple files.
#kReportReplacements.append((re.compile('<!-- REPORTCRASHES -->'),
# '<br>These files will automatically be attached to ' +
# 'reports filed here: <a href="report_crashes">Report Crashes</a>.'))
###
# Other simple parameters
kShare = posixpath.join(posixpath.dirname(__file__), '../share/scan-view')
kConfigPath = os.path.expanduser('~/.scanview.cfg')
###
__version__ = "0.1"
__all__ = ["create_server"]
class ReporterThread(threading.Thread):
def __init__(self, report, reporter, parameters, server):
threading.Thread.__init__(self)
self.report = report
self.server = server
self.reporter = reporter
self.parameters = parameters
self.success = False
self.status = None
def run(self):
result = None
try:
if self.server.options.debug:
print("%s: SERVER: submitting bug."%(sys.argv[0],), file=sys.stderr)
self.status = self.reporter.fileReport(self.report, self.parameters)
self.success = True
time.sleep(3)
if self.server.options.debug:
print("%s: SERVER: submission complete."%(sys.argv[0],), file=sys.stderr)
except Reporter.ReportFailure as e:
self.status = e.value
except Exception as e:
s = StringIO()
import traceback
print('<b>Unhandled Exception</b><br><pre>', file=s)
traceback.print_exc(file=s)
print('</pre>', file=s)
self.status = s.getvalue()
class ScanViewServer(HTTPServer):
def __init__(self, address, handler, root, reporters, options):
HTTPServer.__init__(self, address, handler)
self.root = root
self.reporters = reporters
self.options = options
self.halted = False
self.config = None
self.load_config()
def load_config(self):
self.config = configparser.RawConfigParser()
# Add defaults
self.config.add_section('ScanView')
for r in self.reporters:
self.config.add_section(r.getName())
for p in r.getParameters():
if p.saveConfigValue():
self.config.set(r.getName(), p.getName(), '')
# Ignore parse errors
try:
self.config.read([kConfigPath])
except:
pass
# Save on exit
import atexit
atexit.register(lambda: self.save_config())
def save_config(self):
# Ignore errors (only called on exit).
try:
f = open(kConfigPath,'w')
self.config.write(f)
f.close()
except:
pass
def halt(self):
self.halted = True
if self.options.debug:
print("%s: SERVER: halting." % (sys.argv[0],), file=sys.stderr)
def serve_forever(self):
while not self.halted:
if self.options.debug > 1:
print("%s: SERVER: waiting..." % (sys.argv[0],), file=sys.stderr)
try:
self.handle_request()
except OSError as e:
print('OSError',e.errno)
def finish_request(self, request, client_address):
if self.options.autoReload:
import ScanView
self.RequestHandlerClass = reload(ScanView).ScanViewRequestHandler
HTTPServer.finish_request(self, request, client_address)
def handle_error(self, request, client_address):
# Ignore socket errors
info = sys.exc_info()
if info and isinstance(info[1], socket.error):
if self.options.debug > 1:
print("%s: SERVER: ignored socket error." % (sys.argv[0],), file=sys.stderr)
return
HTTPServer.handle_error(self, request, client_address)
# Borrowed from Quixote, with simplifications.
def parse_query(qs, fields=None):
if fields is None:
fields = {}
for chunk in (_f for _f in qs.split('&') if _f):
if '=' not in chunk:
name = chunk
value = ''
else:
name, value = chunk.split('=', 1)
name = unquote(name.replace('+', ' '))
value = unquote(value.replace('+', ' '))
item = fields.get(name)
if item is None:
fields[name] = [value]
else:
item.append(value)
return fields
class ScanViewRequestHandler(SimpleHTTPRequestHandler):
server_version = "ScanViewServer/" + __version__
dynamic_mtime = time.time()
def do_HEAD(self):
try:
SimpleHTTPRequestHandler.do_HEAD(self)
except Exception as e:
self.handle_exception(e)
def do_GET(self):
try:
SimpleHTTPRequestHandler.do_GET(self)
except Exception as e:
self.handle_exception(e)
def do_POST(self):
"""Serve a POST request."""
try:
length = self.headers.getheader('content-length') or "0"
try:
length = int(length)
except:
length = 0
content = self.rfile.read(length)
fields = parse_query(content)
f = self.send_head(fields)
if f:
self.copyfile(f, self.wfile)
f.close()
except Exception as e:
self.handle_exception(e)
def log_message(self, format, *args):
if self.server.options.debug:
sys.stderr.write("%s: SERVER: %s - - [%s] %s\n" %
(sys.argv[0],
self.address_string(),
self.log_date_time_string(),
format%args))
def load_report(self, report):
path = os.path.join(self.server.root, 'report-%s.html'%report)
data = open(path).read()
keys = {}
for item in kBugKeyValueRE.finditer(data):
k,v = item.groups()
keys[k] = v
return keys
def load_crashes(self):
path = posixpath.join(self.server.root, 'index.html')
data = open(path).read()
problems = []
for item in kReportCrashEntryRE.finditer(data):
fieldData = item.group(1)
fields = dict([i.groups() for i in
kReportCrashEntryKeyValueRE.finditer(fieldData)])
problems.append(fields)
return problems
def handle_exception(self, exc):
import traceback
s = StringIO()
print("INTERNAL ERROR\n", file=s)
traceback.print_exc(file=s)
f = self.send_string(s.getvalue(), 'text/plain')
if f:
self.copyfile(f, self.wfile)
f.close()
def get_scalar_field(self, name):
if name in self.fields:
return self.fields[name][0]
else:
return None
def submit_bug(self, c):
title = self.get_scalar_field('title')
description = self.get_scalar_field('description')
report = self.get_scalar_field('report')
reporterIndex = self.get_scalar_field('reporter')
files = []
for fileID in self.fields.get('files',[]):
try:
i = int(fileID)
except:
i = None
if i is None or i<0 or i>=len(c.files):
return (False, 'Invalid file ID')
files.append(c.files[i])
if not title:
return (False, "Missing title.")
if not description:
return (False, "Missing description.")
try:
reporterIndex = int(reporterIndex)
except:
return (False, "Invalid report method.")
# Get the reporter and parameters.
reporter = self.server.reporters[reporterIndex]
parameters = {}
for o in reporter.getParameters():
name = '%s_%s'%(reporter.getName(),o.getName())
if name not in self.fields:
return (False,
'Missing field "%s" for %s report method.'%(name,
reporter.getName()))
parameters[o.getName()] = self.get_scalar_field(name)
# Update config defaults.
if report != 'None':
self.server.config.set('ScanView', 'reporter', reporterIndex)
for o in reporter.getParameters():
if o.saveConfigValue():
name = o.getName()
self.server.config.set(reporter.getName(), name, parameters[name])
# Create the report.
bug = Reporter.BugReport(title, description, files)
# Kick off a reporting thread.
t = ReporterThread(bug, reporter, parameters, self.server)
t.start()
# Wait for thread to die...
while t.isAlive():
time.sleep(.25)
submitStatus = t.status
return (t.success, t.status)
def send_report_submit(self):
report = self.get_scalar_field('report')
c = self.get_report_context(report)
if c.reportSource is None:
reportingFor = "Report Crashes > "
fileBug = """\
<a href="/report_crashes">File Bug</a> > """%locals()
else:
reportingFor = '<a href="/%s">Report %s</a> > ' % (c.reportSource,
report)
fileBug = '<a href="/report/%s">File Bug</a> > ' % report
title = self.get_scalar_field('title')
description = self.get_scalar_field('description')
res,message = self.submit_bug(c)
if res:
statusClass = 'SubmitOk'
statusName = 'Succeeded'
else:
statusClass = 'SubmitFail'
statusName = 'Failed'
result = """
<head>
<title>Bug Submission</title>
<link rel="stylesheet" type="text/css" href="/scanview.css" />
</head>
<body>
<h3>
<a href="/">Summary</a> >
%(reportingFor)s
%(fileBug)s
Submit</h3>
<form name="form" action="">
<table class="form">
<tr><td>
<table class="form_group">
<tr>
<td class="form_clabel">Title:</td>
<td class="form_value">
<input type="text" name="title" size="50" value="%(title)s" disabled>
</td>
</tr>
<tr>
<td class="form_label">Description:</td>
<td class="form_value">
<textarea rows="10" cols="80" name="description" disabled>
%(description)s
</textarea>
</td>
</table>
</td></tr>
</table>
</form>
<h1 class="%(statusClass)s">Submission %(statusName)s</h1>
%(message)s
<p>
<hr>
<a href="/">Return to Summary</a>
</body>
</html>"""%locals()
return self.send_string(result)
def send_open_report(self, report):
try:
keys = self.load_report(report)
except IOError:
return self.send_error(400, 'Invalid report.')
file = keys.get('FILE')
if not file or not posixpath.exists(file):
return self.send_error(400, 'File does not exist: "%s"' % file)
import startfile
if self.server.options.debug:
print('%s: SERVER: opening "%s"'%(sys.argv[0],
file), file=sys.stderr)
status = startfile.open(file)
if status:
res = 'Opened: "%s"' % file
else:
res = 'Open failed: "%s"' % file
return self.send_string(res, 'text/plain')
def get_report_context(self, report):
class Context(object):
pass
if report is None or report == 'None':
data = self.load_crashes()
# Don't allow empty reports.
if not data:
raise ValueError('No crashes detected!')
c = Context()
c.title = 'clang static analyzer failures'
stderrSummary = ""
for item in data:
if 'stderr' in item:
path = posixpath.join(self.server.root, item['stderr'])
if os.path.exists(path):
lns = itertools.islice(open(path), 0, 10)
stderrSummary += '%s\n--\n%s' % (item.get('src',
'<unknown>'),
''.join(lns))
c.description = """\
The clang static analyzer failed on these inputs:
%s
STDERR Summary
--------------
%s
""" % ('\n'.join([item.get('src','<unknown>') for item in data]),
stderrSummary)
c.reportSource = None
c.navMarkup = "Report Crashes > "
c.files = []
for item in data:
c.files.append(item.get('src',''))
c.files.append(posixpath.join(self.server.root,
item.get('file','')))
c.files.append(posixpath.join(self.server.root,
item.get('clangfile','')))
c.files.append(posixpath.join(self.server.root,
item.get('stderr','')))
c.files.append(posixpath.join(self.server.root,
item.get('info','')))
# Just in case something failed, ignore files which don't
# exist.
c.files = [f for f in c.files
if os.path.exists(f) and os.path.isfile(f)]
else:
# Check that this is a valid report.
path = posixpath.join(self.server.root, 'report-%s.html' % report)
if not posixpath.exists(path):
raise ValueError('Invalid report ID')
keys = self.load_report(report)
c = Context()
c.title = keys.get('DESC','clang error (unrecognized')
c.description = """\
Bug reported by the clang static analyzer.
Description: %s
File: %s
Line: %s
"""%(c.title, keys.get('FILE','<unknown>'), keys.get('LINE', '<unknown>'))
c.reportSource = 'report-%s.html' % report
c.navMarkup = """<a href="/%s">Report %s</a> > """ % (c.reportSource,
report)
c.files = [path]
return c
def send_report(self, report, configOverrides=None):
def getConfigOption(section, field):
if (configOverrides is not None and
section in configOverrides and
field in configOverrides[section]):
return configOverrides[section][field]
return self.server.config.get(section, field)
# report is None is used for crashes
try:
c = self.get_report_context(report)
except ValueError as e:
return self.send_error(400, e.message)
title = c.title
description= c.description
reportingFor = c.navMarkup
if c.reportSource is None:
extraIFrame = ""
else:
extraIFrame = """\
<iframe src="/%s" width="100%%" height="40%%"
scrolling="auto" frameborder="1">
<a href="/%s">View Bug Report</a>
</iframe>""" % (c.reportSource, c.reportSource)
reporterSelections = []
reporterOptions = []
try:
active = int(getConfigOption('ScanView','reporter'))
except:
active = 0
for i,r in enumerate(self.server.reporters):
selected = (i == active)
if selected:
selectedStr = ' selected'
else:
selectedStr = ''
reporterSelections.append('<option value="%d"%s>%s</option>'%(i,selectedStr,r.getName()))
options = '\n'.join([ o.getHTML(r,title,getConfigOption) for o in r.getParameters()])
display = ('none','')[selected]
reporterOptions.append("""\
<tr id="%sReporterOptions" style="display:%s">
<td class="form_label">%s Options</td>
<td class="form_value">
<table class="form_inner_group">
%s
</table>
</td>
</tr>
"""%(r.getName(),display,r.getName(),options))
reporterSelections = '\n'.join(reporterSelections)
reporterOptionsDivs = '\n'.join(reporterOptions)
reportersArray = '[%s]'%(','.join([repr(r.getName()) for r in self.server.reporters]))
if c.files:
fieldSize = min(5, len(c.files))
attachFileOptions = '\n'.join(["""\
<option value="%d" selected>%s</option>""" % (i,v) for i,v in enumerate(c.files)])
attachFileRow = """\
<tr>
<td class="form_label">Attach:</td>
<td class="form_value">
<select style="width:100%%" name="files" multiple size=%d>
%s
</select>
</td>
</tr>
""" % (min(5, len(c.files)), attachFileOptions)
else:
attachFileRow = ""
result = """<html>
<head>
<title>File Bug</title>
<link rel="stylesheet" type="text/css" href="/scanview.css" />
</head>
<script language="javascript" type="text/javascript">
var reporters = %(reportersArray)s;
function updateReporterOptions() {
index = document.getElementById('reporter').selectedIndex;
for (var i=0; i < reporters.length; ++i) {
o = document.getElementById(reporters[i] + "ReporterOptions");
if (i == index) {
o.style.display = "";
} else {
o.style.display = "none";
}
}
}
</script>
<body onLoad="updateReporterOptions()">
<h3>
<a href="/">Summary</a> >
%(reportingFor)s
File Bug</h3>
<form name="form" action="/report_submit" method="post">
<input type="hidden" name="report" value="%(report)s">
<table class="form">
<tr><td>
<table class="form_group">
<tr>
<td class="form_clabel">Title:</td>
<td class="form_value">
<input type="text" name="title" size="50" value="%(title)s">
</td>
</tr>
<tr>
<td class="form_label">Description:</td>
<td class="form_value">
<textarea rows="10" cols="80" name="description">
%(description)s
</textarea>
</td>
</tr>
%(attachFileRow)s
</table>
<br>
<table class="form_group">
<tr>
<td class="form_clabel">Method:</td>
<td class="form_value">
<select id="reporter" name="reporter" onChange="updateReporterOptions()">
%(reporterSelections)s
</select>
</td>
</tr>
%(reporterOptionsDivs)s
</table>
<br>
</td></tr>
<tr><td class="form_submit">
<input align="right" type="submit" name="Submit" value="Submit">
</td></tr>
</table>
</form>
%(extraIFrame)s
</body>
</html>"""%locals()
return self.send_string(result)
def send_head(self, fields=None):
if (self.server.options.onlyServeLocal and
self.client_address[0] != '127.0.0.1'):
return self.send_error(401, 'Unauthorized host.')
if fields is None:
fields = {}
self.fields = fields
o = urlparse(self.path)
self.fields = parse_query(o.query, fields)
path = posixpath.normpath(unquote(o.path))
# Split the components and strip the root prefix.
components = path.split('/')[1:]
# Special case some top-level entries.
if components:
name = components[0]
if len(components)==2:
if name=='report':
return self.send_report(components[1])
elif name=='open':
return self.send_open_report(components[1])
elif len(components)==1:
if name=='quit':
self.server.halt()
return self.send_string('Goodbye.', 'text/plain')
elif name=='report_submit':
return self.send_report_submit()
elif name=='report_crashes':
overrides = { 'ScanView' : {},
'Radar' : {},
'Email' : {} }
for i,r in enumerate(self.server.reporters):
if r.getName() == 'Radar':
overrides['ScanView']['reporter'] = i
break
overrides['Radar']['Component'] = 'llvm - checker'
overrides['Radar']['Component Version'] = 'X'
return self.send_report(None, overrides)
elif name=='favicon.ico':
return self.send_path(posixpath.join(kShare,'bugcatcher.ico'))
# Match directory entries.
if components[-1] == '':
components[-1] = 'index.html'
relpath = '/'.join(components)
path = posixpath.join(self.server.root, relpath)
if self.server.options.debug > 1:
print('%s: SERVER: sending path "%s"'%(sys.argv[0],
path), file=sys.stderr)
return self.send_path(path)
def send_404(self):
self.send_error(404, "File not found")
return None
def send_path(self, path):
# If the requested path is outside the root directory, do not open it
rel = os.path.abspath(path)
if not rel.startswith(os.path.abspath(self.server.root)):
return self.send_404()
ctype = self.guess_type(path)
if ctype.startswith('text/'):
# Patch file instead
return self.send_patched_file(path, ctype)
else:
mode = 'rb'
try:
f = open(path, mode)
except IOError:
return self.send_404()
return self.send_file(f, ctype)
def send_file(self, f, ctype):
# Patch files to add links, but skip binary files.
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def send_string(self, s, ctype='text/html', headers=True, mtime=None):
encoded_s = s.encode()
if headers:
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", str(len(encoded_s)))
if mtime is None:
mtime = self.dynamic_mtime
self.send_header("Last-Modified", self.date_time_string(mtime))
self.end_headers()
return BytesIO(encoded_s)
def send_patched_file(self, path, ctype):
# Allow a very limited set of variables. This is pretty gross.
variables = {}
variables['report'] = ''
m = kReportFileRE.match(path)
if m:
variables['report'] = m.group(2)
try:
f = open(path,'r')
except IOError:
return self.send_404()
fs = os.fstat(f.fileno())
data = f.read()
for a,b in kReportReplacements:
data = a.sub(b % variables, data)
return self.send_string(data, ctype, mtime=fs.st_mtime)
def create_server(address, options, root):
import Reporter
reporters = Reporter.getReporters()
return ScanViewServer(address, ScanViewRequestHandler,
root,
reporters,
options)
| |
from __future__ import division, print_function, unicode_literals
import six
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import pyglet
from pyglet.gl import *
from pyglet import image
from cocos.director import *
from cocos.menu import *
from cocos.scene import *
from cocos.layer import *
from cocos.actions import *
from cocos.sprite import Sprite
from cocos.text import Label
from Images import *
from const import *
from HUD import *
from Skills import Skills
from monsters import DM_Monsters
from magic import DM_Magic
from artefacts import ArtEffects
from controler import Controler
import random
class GameModel( pyglet.event.EventDispatcher):
def __init__(self):
self.end_of_init()
def end_of_init(self):
super(GameModel, self).__init__()
self.mapx = Quad_side
self.mapy = Quad_side
self.init_map()
w, h = director.get_window_size()
sc = 1920//w
self.heroes = {}
self.heroes['wizard'] = Hero('wizard', 0, self, 0, 0)
self.heroes['priest'] = Hero('priest', 1, self, 0, Quad_side - 1)
self.heroes['warrior'] = Hero('warrior', 2, self, Quad_side - 1, Quad_side - 1)
self.heroes['rogue'] = Hero('rogue', 3, self, Quad_side - 1, 0)
self.interface_DM = Interface(self)
self.monsters = []
self.alive_heroes = ['wizard', 'priest', 'warrior', 'rogue']
self.actual_hero = ['wizard']
self.controler = Controler(self)
self.skill_use = 0
c = self.map.get((0, 0))
c.open_P = 1
c = self.map.get((Quad_side - 1, 0))
c.open_P = 1
c = self.map.get((Quad_side - 1, Quad_side - 1))
c.open_P = 1
c = self.map.get((0, Quad_side - 1))
c.open_P = 1
c = self.map.get(((Quad_side - 1)/2, (Quad_side - 1)/2))
c.name = 'treasure'
c.open_P = 1
def init_map(self):
self.map = {}
self.map['actual_tile'] = (-1, -1)
w, h = director.get_window_size()
sc = 1920/w
for i in range (self.mapx):
for j in range (self.mapy):
self.map[(i, j)] = Tile(Images.floortile, ((2*i + 1)*Tile_size/2 + left_space)//sc,
((2*j + 1)*Tile_size/2 + (h*sc - (Quad_side*Tile_size))/2)//sc, i, j)
def on_gameover(self):
if (len(self.alive_heroes) == 0):
self.dispatch_event("on_game_over")
def on_youwin(self):
self.dispatch_event("on_you_win")
def on_artget(self, hero):
Art_menu = Art_Menu(self, hero)
self.dispatch_event("on_art_get", Art_menu)
class Tile():
def __init__(self, image, posx, posy ,map_posx, map_posy):
w, h = director.get_window_size()
sc = 1920/w
self.namenumber = 0
self.name = 'floor'
self.map_pos_x = map_posx
self.map_pos_y = map_posy
self.open_P = 0
self.sprite = Sprite(Images.tile_image[self.name], (posx, posy), scale = 1/sc)
self.sprite_smoke = Sprite(Images.tile_image['smoke'], (posx, posy), scale = 1/sc)
self.monster = 0
self.smoke = 0
self.buildav = 1
self.wall = 0
def next_turn(self):
if self.smoke:
self.smoke = self.smoke - 1
def next_turn_DM(self):
self.buildav = 1
if (self.open_P) or (self.smoke):
self.buildav = 0
def on_click_P(self):
self.open_P = 1
def draw(self):
self.sprite.image = Images.tile_image[self.name]
self.sprite.draw()
if self.smoke:
self.sprite_smoke.draw()
class Interface(Layer):
def __init__(self, model):
self.model = model
self.portraits = {'wizard': Hero_portriat_DM(), 'priest': Hero_portriat_DM(),
'warrior': Hero_portriat_DM(),'rogue': Hero_portriat_DM()}
for c in self.portraits:
self.portraits[c].reload(model, c)
self.money = starting_money
def draw(self):
w, h = director.get_window_size()
sc = 1920/w
self.label = Label('%d' %self.money, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255))
self.label.position = (1670//sc, 1030//sc)
self.label.draw()
for c in self.portraits:
self.portraits[c].draw()
class BuildingMenu(Layer):
def __init__(self):
super(BuildingMenu, self).__init__()
self.end_of_init()
def end_of_init(self):
w, h = director.get_window_size()
sc = 1920//w
self.visible = 0
self.b_types = {}
self.b_types[0] = SecondB_menu('trap', 0)
self.b_types[1] = SecondB_menu('monster', 1)
self.b_types[2] = SecondB_menu('magic', 2)
self.b_types[3] = SecondB_menu('wall', 3)
self.scroll = Sprite(Images.menu_scroll, (1500//sc, 380//sc), scale = 1/sc)
self.active = -1
def draw(self):
if self.visible:
w, h = director.get_window_size()
sc = 1920//w
self.scroll.draw()
for b_type in self.b_types:
self.b_types[b_type].sprite.draw()
if self.active == b_type:
self.b_types[b_type].draw()
class SecondB_menu(Layer):
def __init__(self, name, number):
super(SecondB_menu, self).__init__()
w, h = director.get_window_size()
sc = 1920//w
self.name = name
self.number = number
self.sprite = Sprite(Images.building_menu[self.name], (1100//sc, (580 - self.number*B_Menu_size*1.1)//sc), scale = 1/sc)
self.objects = Object_list[self.name]
def draw(self):
w, h = director.get_window_size()
sc = 1920//w
c = Sprite(Images.frame_black, (1100//sc, (580 - self.number*B_Menu_size*1.1)//sc), scale = 1/sc)
c.draw()
for object in range(len(self.objects)):
dx, dy = self.get_coordinates(object)
c = Sprite(Images.B_images[self.name][object],
((1315 + dx)//sc, (505 + dy)//sc), scale = 1/sc)
c.draw()
def get_coordinates(self, number):
x = number%4
y = number//4
dx = x*(m_a+ m_b)
dy = -y*(m_a+ m_b)
return(dx, dy)
class Magic():
def __init__(self, name, model):
self.name = name
self.model = model
def cast(self):
spell = DM_Magic[self.name]
spell.cast(self.model)
class Hero_portriat_DM():
def reload(self, model, hero_name):
w, h = director.get_window_size()
sc = 1920/w
self.model = model
self.name = hero_name
if self.name == 'wizard':
self.hero = self.model.heroes[self.name]
self.sprite = Sprite(Images.hero_icons[self.name], (1600//sc, 900//sc), scale = 1/sc)
self.sprite_black = Sprite(Images.hero_icons_black[self.name], (1600//sc, 900//sc), scale = 1/sc)
self.label = Label('%d' %self.hero.stats.health, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )
self.label.position = 1600//sc, 870//sc
if self.name == 'priest':
self.hero = self.model.heroes[self.name]
self.sprite = Sprite(Images.hero_icons[self.name], (1720//sc, 900//sc), scale = 1/sc)
self.sprite_black = Sprite(Images.hero_icons_black[self.name], (1720//sc, 900//sc), scale = 1/sc)
self.label = Label('%d' %self.hero.stats.health, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )
self.label.position = 1720//sc, 870//sc
if self.name == 'warrior':
self.hero = self.model.heroes[self.name]
self.sprite = Sprite(Images.hero_icons[self.name], (1600//sc, 780//sc), scale = 1/sc)
self.sprite_black = Sprite(Images.hero_icons_black[self.name], (1600//sc, 780//sc), scale = 1/sc)
self.label = Label('%d' %self.hero.stats.health, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )
self.label.position = 1600//sc, 750//sc
if self.name == 'rogue':
self.hero = self.model.heroes[self.name]
self.sprite = Sprite(Images.hero_icons[self.name], (1720//sc, 780//sc), scale = 1/sc)
self.sprite_black = Sprite(Images.hero_icons_black[self.name], (1720//sc, 780//sc), scale = 1/sc)
self.label = Label('%d' %self.hero.stats.health, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )
self.label.position = 1720//sc, 750//sc
def draw(self):
if self.hero.alive:
self.sprite.draw()
self.label.font_name = '%d' %self.hero.stats.health
self.label.draw()
else:
self.sprite_black.draw()
class Hero():
def __init__(self, name, number ,model, map_posx, map_posy):
w, h = director.get_window_size()
sc = 1920/w
self.name = name
self.number = number
self.map_posx = map_posx
self.map_posy = map_posy
self.sprite = Sprite(Images.heroes[self.name], scale = 1/sc)
self.sprite.position = (((2*self.map_posx + 1)*Tile_size/2 + left_space)//sc, ((2*self.map_posy + 1)*Tile_size/2 + (h*sc - Quad_side*Tile_size)/2)//sc)
self.portrait = Portraits(self)
self.icon = Icons(self)
self.alive = 1
self.techstats = Tech_Stats(self.name)
self.stats = Stats(self.name)
self.staff = {}
self.av_art = list(Artefacts)
self.art_cell = 0
self.model = model
self.turnav = self.techstats.speed
self.stats.health = self.techstats.max_health
self.skills = []
self.effects = Hero_effects(self, self.model)
for s in Hero_skills[self.name]:
self.skills.append(Skill(s[0], s[1], self, self.model))
def replace_hero(self, map_posx, map_posy):
w, h = director.get_window_size()
sc = 1920/w
self.map_posx = map_posx
self.map_posy = map_posy
self.sprite.position = (((2*self.map_posx + 1)*Tile_size/2 + left_space)//sc, ((2*self.map_posy + 1)*Tile_size/2 + (h*sc - Quad_side*Tile_size)/2)//sc)
def on_turn(self, tile):
self.turnav = self.turnav - 1
replace_hero = 1
if (tile.name == 'lava'):
self.model.controler.damage_hero(self.name, lava_damage)
if (tile.name == 'ice'):
self.turnav = 0
self.effects.miss_turn = 1
if (tile.name == 'poison'):
self.effects.trap_poison = trap_poison_duration
if (tile.name == 'wall'):
replace_hero = 0
if (tile.monster):
result = tile.monster.monster.fight(self)
if result == 'lose':
replace_hero = 0
if result == 'win':
self.on_win()
if (tile.open_P == 0):
self.stats.exp = self.stats.exp + self.techstats.exp_per_tile
if self.stats.lvl < maxlvl:
if (self.stats.exp >= ExpNeed[self.stats.lvl]):
self.model.controler.lvlup_hero(self.name)
if (tile.name == 'floor'):
self.stats.luck = self.stats.luck + self.techstats.luck_per_tile
if self.stats.luck >= 100:
if (self.stats.lvl >= self.art_cell + 1):
self.stats.luck = self.stats.luck - 100
if (len(self.staff) < 5):
self.stats.luck = 0
self.model.on_artget(self)
else:
self.stats.luck = self.stats.luck - self.techstats.luck_per_tile
if replace_hero:
self.replace_hero(tile.map_pos_x, tile.map_pos_y)
if (tile.name == 'treasure'):
self.model.on_youwin()
self.effects.on_turn_effects()
def on_win(self):
if self.effects.drain_life:
self.stats.health = self.stats.health + drain_health
self.techstats.max_health = self.techstats.max_health + drain_health
self.stats.armor = self.stats.armor + drain_armor
self.stats.attack = self.stats.attack + drain_attack
def draw(self):
self.sprite.draw()
self.icon.draw()
def reload(self):
self.turnav = self.techstats.speed
self.effects.reload()
class Hero_effects():
def __init__(self, hero, model):
self.hero = hero
self.model = model
self.miss_turn = 0
self.trap_poison = 0
self.circle_of_light = 0
self.drain_life = 0
self.revive_art = 0
def on_turn_effects(self):
if self.circle_of_light:
for dx, dy in adject_tiles:
i = self.hero.map_posx + dx
j = self.hero.map_posy + dy
if (i >= 0) and (j >= 0) and (i < Quad_side) and (j < Quad_side):
tile = self.model.map.get((i, j))
tile.open_P = 1
def reload(self):
for skill in self.hero.skills:
skill.skill.next_turn()
if self.miss_turn:
self.hero.turnav = 0
self.miss_turn = self.miss_turn - 1
if self.trap_poison:
self.model.controler.damage_hero(self.hero.name, trap_poison_damage)
self.trap_poison = self.trap_poison - 1
if self.circle_of_light:
self.circle_of_light = self.circle_of_light - 1
class Artefact():
def __init__(self, name, number):
w, h = director.get_window_size()
sc = 1920/w
self.name = name
self.number = number
self.sprite = Sprite(Images.art_image[self.name], ((1562 + 75/2)//sc, (899 - art_pos[self.number])//sc), scale = 1/sc)
def on_get(self, hero):
if (ArtEffects.get(self.name)):
ArtEffects[self.name](hero).effect()
def draw(self):
self.sprite.draw()
class Skill():
def __init__(self, name, number, hero, model):
w, h = director.get_window_size()
sc = 1920/w
self.name = name
self.number = number
self.sprite = Sprite(Images.skill_image[self.name], ((1208)//sc, (899 - skill_pos[self.number])//sc), scale = 1/sc)
self.skill = Skills[self.name](hero , model)
def draw(self):
w, h = director.get_window_size()
sc = 1920/w
if (self.skill.learnt):
self.sprite.draw()
if self.skill.cd_left:
self.label = Label('%d' %self.skill.cd_left, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )
self.label.position = ((1208)//sc, (899 - skill_pos[self.number])//sc)
self.label.draw()
def use(self):
if self.skill.available:
self.skill.use()
class Tech_Stats():
def __init__(self, hero_name):
self.speed = Tech_stat[hero_name]['speed']
self.max_health = Tech_stat[hero_name]['maxhp']
self.exp_per_tile = exp_per_tile
self.luck_per_tile = luck_per_tile
class Stats():
def __init__(self, hero_name):
self.exp = 0
self.int = 0
self.health = 0
self.lvl = 1
self.luck = 0
self.attack = Tech_stat[hero_name]['attack']
self.armor = Tech_stat[hero_name]['armor']
self.power = self.attack*2 + self.armor
class HeroStats(Label):
def __init__(self, hero):
w, h = director.get_window_size()
sc = 1920/w
self.hero = hero
self.health_label = Label('%d' %self.hero.stats.health, font_name='Times New Roman', font_size=28//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )
self.health_label.position = 1300//sc, 315//sc
self.exp_label = Label('%d' %self.hero.stats.exp, font_name='Times New Roman', font_size=28//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )
self.exp_label.position = 1300//sc, 155//sc
self.luck_label = Label('%d%%' %self.hero.stats.luck, font_name='Times New Roman', font_size=28//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )
self.luck_label.position = 1300//sc, 245//sc
self.lvl_label = Label('%d' %self.hero.stats.lvl, font_name='Times New Roman', font_size=18//sc, anchor_x='center', anchor_y='center', color = (0, 0, 0, 255) )
self.lvl_label.position = 1618//sc, 112//sc
self.attack_label = Label('%d' %self.hero.stats.attack, font_name='Times New Roman', font_size=28//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )
self.attack_label.position = 1550//sc, 315//sc
self.armor_label = Label('%d' %self.hero.stats.armor, font_name='Times New Roman', font_size=28//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255))
self.armor_label.position = 1550//sc, 245//sc
def draw(self):
self.health_label.draw()
self.lvl_label.draw()
self.exp_label.draw()
self.luck_label.draw()
self.attack_label.draw()
self.armor_label.draw()
class Portraits():
def __init__(self, hero):
self.hero = hero
def draw(self):
w, h = director.get_window_size()
sc = 1920/w
c = Sprite(Images.portraits[self.hero.name], (1400//sc, 480//sc), scale = 1/sc)
c.draw()
c = HeroStats(self.hero)
c.draw()
for art in self.hero.staff:
self.hero.staff[art].draw()
for skill in self.hero.skills:
skill.draw()
class Icons():
def __init__(self, hero):
self.hero = hero
if self.hero.name == 'wizard':
self.number = 0
if self.hero.name == 'priest':
self.number = 1
if self.hero.name == 'warrior':
self.number = 2
if self.hero.name == 'rogue':
self.number = 3
def draw(self):
w, h = director.get_window_size()
sc = 1920/w
if (self.hero.turnav > 0):
c = Sprite(Images.hero_icons[self.hero.name], ((1235 + Icon_size*self.number*1.1)//sc, 960//sc), scale = 1/sc)
else:
c = Sprite(Images.hero_icons_black[self.hero.name], ((1235 + Icon_size*self.number*1.1)//sc, 960//sc), scale = 1/sc)
c.draw()
class Monster():
def __init__(self, model, tile, m_name):
w, h = director.get_window_size()
sc = 1920//w
self.name = m_name
self.tile = tile
self.monster = DM_Monsters[self.name](model, tile)
class Art_Menu():
def __init__(self, model, hero):
w, h = director.get_window_size()
sc = 1920//w
self.hero = hero
self.model = model
self.arts = Art_menu[hero.name][hero.art_cell + 1]
self.art_sprites = {}
for number in range(len(self.arts)):
self.art_sprites[number] = Sprite(Images.art_image[self.arts[number]],
(art_menu_pos_x[number]//sc, art_menu_pos_y[number]//sc), scale = 2/sc)
def draw(self):
for art in self.art_sprites:
if (self.hero.av_art.count(self.arts[art])):
self.art_sprites[art].draw()
GameModel.register_event_type('on_game_over')
GameModel.register_event_type('on_you_win')
GameModel.register_event_type('on_art_get')
| |
# Copyright 2014 NeuroData (http://neurodata.io
# Upload Sizes for max memory and number of fields
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import django.http
from django.views.decorators.cache import cache_control
from django.contrib.auth.decorators import login_required
import MySQLdb
import cStringIO
import re
from ndauth.authentication import PublicAuthentication, AnonAllowedAuthentication
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.permissions import IsAuthenticated, AllowAny
from nduser.models import Token
import webservices.ndwsrest as ndwsrest
import webservices.ndwsprojingest as ndwsprojingest
from webservices.ndwserror import NDWSError, IncorrectSyntaxError
import logging
logger=logging.getLogger("neurodata")
GET_SLICE_SERVICES = ['xy', 'yz', 'xz']
GET_ANNO_SERVICES = ['xyanno', 'yzanno', 'xzanno']
POST_SERVICES = ['hdf5', 'npz', 'raw', 'hdf5_async', 'propagate', 'tiff', 'blosc', 'blaze']
@api_view(['GET','POST'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def cutout (request, webargs):
"""Restful URL for all read services to annotation projects"""
try:
m = re.match(r"(\w+)/(?P<channel>[\w+,-]+)?/?(xy|xz|yz|tiff|hdf5|jpeg|blosc|blaze|npz|raw|zip|id|diff|ids|xyanno|xzanno|yzanno)/([\w\.,/-]*)$", webargs)
[token, channel, service, cutoutargs] = [i for i in m.groups()]
if channel is None:
webargs = '{}/default/{}/{}'.format(token, service, cutoutargs)
except Exception as e:
logger.warning("Incorrect format for arguments {}. {}".format(webargs, e))
raise IncorrectSyntaxError("Incorrect format for arguments {}. {}".format(webargs, e))
try:
# GET methods
if request.method == 'GET':
if service in GET_SLICE_SERVICES+GET_ANNO_SERVICES:
return django.http.HttpResponse(ndwsrest.getCutout(webargs), content_type="image/png" )
elif service in ['hdf5']:
fname = re.sub ( r',','_', webargs )
fname = re.sub ( r'/','-', fname )
response = django.http.HttpResponse(ndwsrest.getCutout(webargs), content_type="product/hdf5" )
response['Content-Disposition'] = "attachment; filename={}ndcutout.h5".format(fname)
return response
elif service in ['blosc', 'diff']:
fname = re.sub ( r',','_', webargs )
fname = re.sub ( r'/','-', fname )
response = django.http.HttpResponse(ndwsrest.getCutout(webargs), content_type="product/blosc" )
response['Content-Disposition'] = "attachment; filename={}ndcutout.blosc".format(fname)
return response
elif service in ['jpeg']:
fname = re.sub ( r',','_', webargs )
fname = re.sub ( r'/','-', fname )
response = django.http.HttpResponse(ndwsrest.getCutout(webargs), content_type="product/jpeg" )
response['Content-Disposition'] = "attachment; filename={}ndcutout.jpeg".format(fname)
return response
elif service in ['npz']:
return django.http.HttpResponse(ndwsrest.getCutout(webargs), content_type="product/npz" )
elif service in ['raw']:
fname = re.sub ( r',','_', webargs )
fname = re.sub ( r'/','-', fname )
response = django.http.HttpResponse(ndwsrest.getCutout(webargs), content_type="product/raw" )
response['Content-Disposition'] = "attachment; filename={}ndcutout.raw".format(fname)
return response
elif service in ['tiff']:
# build a file name from the webarguments
fname = re.sub ( r',','_', webargs )
fname = re.sub ( r'/','-', fname )
response = django.http.HttpResponse(ndwsrest.getCutout(webargs), content_type="image/tiff" )
response['Content-Disposition'] = "attachment; filename={}ndcutout.tif".format(fname)
return response
elif service in ['zip']:
return django.http.HttpResponse(ndwsrest.getCutout(webargs), content_type="product/zip" )
elif service in ['id','ids']:
return django.http.HttpResponse(ndwsrest.getCutout(webargs))
elif service in ['blaze']:
logger.warning("HTTP Bad request. {} service not supported for GET. Only for POST".format(service))
return django.http.HttpResponseBadRequest("{} service not supported for GET. Only for POST".format(service))
else:
logger.warning("HTTP Bad request. Could not find service {}".format(service))
return django.http.HttpResponseBadRequest("Could not find service {}".format(service))
# RBTODO control caching?
# POST methods
elif request.method == 'POST':
if service in POST_SERVICES:
django.http.HttpResponse(ndwsrest.postCutout(webargs, request.body))
return django.http.HttpResponse("Success", content_type='text/html')
else:
logger.warning("HTTP Bad request. Could not find service {}".format(service))
return django.http.HttpResponseBadRequest("Could not find service {}".format(service))
else:
logger.warning("Invalid HTTP method {}. Not GET or POST.".format(request.method))
return django.http.HttpResponseBadRequest("Invalid HTTP method {}. Not GET or POST.".format(request.method))
except NDWSError as e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error as e:
return django.http.HttpResponseNotFound(e)
except Exception as e:
logger.exception("Unknown exception in getCutout. {}".format(e))
raise NDWSError("Unknown exception in getCutout. {}".format(e))
@api_view(['GET','POST'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def nifti (request, webargs):
"""Get put interface for nifti files"""
try:
if request.method == 'GET':
fname = "".join([x if x.isalnum() else "_" for x in webargs])
response = django.http.HttpResponse(ndwsrest.getNIFTI(webargs), content_type="product/nii" )
response['Content-Disposition'] = "attachment; filename={}.nii.gz".format(fname)
return response
elif request.method == 'POST':
return django.http.HttpResponse(ndwsrest.putNIFTI(webargs,request.body))
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in NIFTI. {}".format(e))
raise NDWSError("Unknown exception in NIFTI. {}".format(e))
raise
@api_view(['GET','POST'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def swc (request, webargs):
"""Get put interface for swc tracing files"""
try:
if request.method == 'GET':
fname = "".join([x if x.isalnum() else "_" for x in webargs])
response = django.http.HttpResponse(ndwsrest.getSWC(webargs), content_type="product/swc" )
response['Content-Disposition'] = "attachment; filename={}.swc".format(fname)
return response
elif request.method == 'POST':
return django.http.HttpResponse(ndwsrest.putSWC(webargs,request.body))
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in SWC. {}".format(e))
raise NDWSError("Unknown exception in SWC. {}".format(e))
raise
@api_view(['GET', 'POST', 'DELETE'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def jsonramon (request, webargs):
"""Get put object interface for JSON-ified RAMON objects"""
[token, channel, rest] = webargs.split('/',2)
try:
if request.method == 'GET':
print "JSON get"
return django.http.HttpResponse(ndwsrest.getJSONAnnotation(webargs), content_type="application/json" )
elif request.method == 'POST':
print "JSON post"
return django.http.HttpResponse(ndwsrest.putJSONAnnotation(webargs,request.body))
elif request.method == 'DELETE':
ndwsrest.deleteAnnotation(webargs)
return django.http.HttpResponse ("Success", content_type='text/html')
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in jsonramon. {}".format(e))
raise NDWSError("Unknown exception in jsonramon. {}".format(e))
@api_view(['GET', 'POST', 'DELETE'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def annotation (request, webargs):
"""Get put object interface for RAMON objects"""
[token, channel, rest] = webargs.split('/',2)
try:
if request.method == 'GET':
# check for json vs hdf5
if rest.split('/')[1] == 'json':
return django.http.HttpResponse(ndwsrest.getAnnotation(webargs), content_type="application/json" )
else:
return django.http.HttpResponse(ndwsrest.getAnnotation(webargs), content_type="product/hdf5" )
elif request.method == 'POST':
return django.http.HttpResponse(ndwsrest.putAnnotation(webargs,request.body))
elif request.method == 'DELETE':
ndwsrest.deleteAnnotation(webargs)
return django.http.HttpResponse ("Success", content_type='text/html')
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in annotation. {}".format(e))
raise NDWSError("Unknown exception in annotation. {}".format(e))
@api_view(['GET'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def csv (request, webargs):
"""Get (not yet put) csv interface for RAMON objects"""
try:
if request.method == 'GET':
return django.http.HttpResponse(ndwsrest.getCSV(webargs), content_type="text/html" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in csv. {}".format(e))
raise NDWSError("Unknown exception in csv. {}".format(e))
@api_view(['GET','POST'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def queryObjects ( request, webargs ):
"""Return a list of objects matching predicates and cutout"""
try:
if request.method == 'GET':
return django.http.HttpResponse(ndwsrest.queryAnnoObjects(webargs), content_type="product/hdf5")
elif request.method == 'POST':
return django.http.HttpResponse(ndwsrest.queryAnnoObjects(webargs,request.body), content_type="product/hdf5")
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in listObjects. {}".format(e))
raise NDWSError("Unknown exception in listObjects. {}".format(e))
@api_view(['GET'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def catmaid (request, webargs):
"""Convert a CATMAID request into an cutout."""
try:
catmaidimg = ndwsrest.ndcatmaid_legacy(webargs)
fobj = cStringIO.StringIO ( )
catmaidimg.save ( fobj, "PNG" )
fobj.seek(0)
return django.http.HttpResponse(fobj.read(), content_type="image/png")
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in catmaid {}.".format(e))
raise NDWSError("Unknown exception in catmaid {}.".format(e))
@api_view(['GET'])
@permission_classes((AllowAny,))
def publictokens (request, webargs):
"""Return list of public tokens"""
try:
return django.http.HttpResponse(ndwsrest.publicTokens(webargs), content_type="application/json" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in publictokens. {}".format(e))
raise NDWSError("Unknown exception in publictokens. {}".format(e))
@api_view(['GET'])
@permission_classes((AllowAny,))
def publicdatasets (request, webargs):
"""Return list of public datasets"""
try:
return django.http.HttpResponse(ndwsrest.publicDatasets(webargs), content_type="application/json" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in publictokens. {}".format(e))
raise NDWSError("Unknown exception in publictokens. {}".format(e))
@api_view(['GET'])
@authentication_classes((AnonAllowedAuthentication,))
@permission_classes((PublicAuthentication,))
def jsoninfo (request, webargs):
"""Return project and dataset configuration information"""
try:
return django.http.HttpResponse(ndwsrest.jsonInfo(webargs), content_type="application/json" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in jsoninfo. {}".format(e))
raise NDWSError("Unknown exception in jsoninfo. {}".format(e))
@api_view(['GET'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def xmlinfo (request, webargs):
"""Return project and dataset configuration information"""
try:
return django.http.HttpResponse(ndwsrest.xmlInfo(webargs), content_type="application/xml" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in xmlinfo. {}".format(e))
raise NDWSError("Unknown exception in xmlinfo. {}".format(e))
@api_view(['GET'])
@authentication_classes((AnonAllowedAuthentication,))
@permission_classes((PublicAuthentication,))
def projinfo (request, webargs):
"""Return project and dataset configuration information"""
try:
return django.http.HttpResponse(ndwsrest.projInfo(webargs), content_type="product/hdf5" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in projInfo. {}".format(e))
raise NDWSError("Unknown exception in projInfo. {}".format(e))
@api_view(['GET'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def mcFalseColor (request, webargs):
"""Cutout of multiple channels with false color rendering"""
try:
return django.http.HttpResponse(ndwsrest.mcFalseColor(webargs), content_type="image/png" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in mcFalseColor. {}".format(e))
raise NDWSError("Unknown exception in mcFalseColor. {}".format(e))
@api_view(['POST'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def reserve (request, webargs):
"""Preallocate a range of ids to an application."""
try:
return django.http.HttpResponse(ndwsrest.reserve(webargs), content_type="application/json" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in reserve. {}".format(e))
raise NDWSError("Unknown exception in reserve. {}".format(e))
@api_view(['GET'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def setField (request, webargs):
"""Set an individual RAMON field for an object"""
try:
ndwsrest.setField(webargs)
return django.http.HttpResponse()
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in setField. {}".format(e))
raise NDWSError("Unknown exception in setField. {}".format(e))
@api_view(['GET'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def getField (request, webargs):
"""Get an individual RAMON field for an object"""
try:
return django.http.HttpResponse(ndwsrest.getField(webargs), content_type="text/html" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in getField. {}".format(e))
raise NDWSError("Unknown exception in getField. {}".format(e))
@api_view(['GET'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def getPropagate (request, webargs):
""" Get the value for Propagate field for a given project """
try:
return django.http.HttpResponse(ndwsrest.getPropagate(webargs), content_type="text/html" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in getPropagate. {}".format(e))
raise NDWSError("Unknown exception in getPropagate. {}".format(e))
@api_view(['GET'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def setPropagate (request, webargs):
""" Set the value for Propagate field for a given project """
try:
ndwsrest.setPropagate(webargs)
return django.http.HttpResponse()
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in setPropagate. {}".format(e))
raise NDWSError("Unknown exception in setPropagate. {}".format(e))
@api_view(['POST'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def merge (request, webargs):
"""Merge annotation objects"""
try:
return django.http.HttpResponse(ndwsrest.merge(webargs), content_type="text/html" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in global Merge. {}".format(e))
raise NDWSError("Unknown exception in global Merge. {}".format(e))
@api_view(['GET'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def exceptions (request, webargs):
"""Return a list of multiply labeled pixels in a cutout region"""
try:
return django.http.HttpResponse(ndwsrest.exceptions(webargs), content_type="product/hdf5" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in exceptions Web service. {}".format(e))
raise NDWSError("Unknown exception in exceptions Web service. {}".format(e))
@api_view(['GET'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def minmaxProject (request, webargs):
"""Restful URL for all read services to annotation projects"""
try:
return django.http.HttpResponse(ndwsrest.minmaxProject(webargs), content_type="image/png" )
except NDWSError, e:
return django.http.HttpResponseNotFound(e.value)
except MySQLdb.Error, e:
return django.http.HttpResponseNotFound(e)
except Exception, e:
logger.exception("Unknown exception in (min|max) projection Web service. {}".format(e))
raise NDWSError("Unknown exception in (min|max) projection Web service. {}".format(e))
# @api_view(['POST'])
# @authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
# @permission_classes((PublicAuthentication,))
def autoIngest(request, webargs):
"""RESTful URL for creating a project using a JSON file"""
try:
return ndwsprojingest.autoIngest(webargs, request.body)
except NDWSError, e:
return django.http.HttpResponseNotFound()
except Exception, e:
logger.exception("Unknown exception in jsonProject Web service. {}".format(e))
raise NDWSError("Unknown exception in jsonProject Web service. {}".format(e))
@api_view(['POST'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def createChannel(request, webargs):
"""RESTful URL for creating a list of channels using a JSON file"""
try:
return ndwsprojingest.createChannel(webargs, request.body)
except NDWSError, e:
return django.http.HttpResponseNotFound()
except Exception, e:
logger.exception("Unknown exception in jsonProject Web service. {}".format(e))
raise NDWSError("Unknown exception in jsonProject Web service. {}".format(e))
@api_view(['POST'])
@authentication_classes((SessionAuthentication, AnonAllowedAuthentication))
@permission_classes((PublicAuthentication,))
def deleteChannel(request, webargs):
"""RESTful URL for deleting a list of channels using a JSON file"""
try:
return ndwsprojingest.deleteChannel(webargs, request.body)
except NDWSError, e:
return django.http.HttpResponseNotFound()
except Exception, e:
logger.exception("Unknown exception in jsonProject Web service. {}".format(e))
raise NDWSError("Unknown exception in jsonProject Web service. {}".format(e))
| |
"""Batch CLI provide a simple API to manage batch process via CLI.
The API can be used when one or more tasks need to be executed
providing output messages to the user.
It is also possible to request input to the user.
Author: siasi@cisco.com
Date: December 2013
"""
class TaskEngine():
"""The Task Engine is able to run multiple Tasks in sequence.
Stop immediately when a task fails. Uses a BatchCli to collect input
and provide output for each Task.
"""
def __init__(self, cli):
"Needs a BatchCli to read/print input and output before runnign the tasks"
self.tasks = []
self.cli = BatchCli(cli)
def addTask(self, task):
"Add a task to be run. The method should be invocked before run()."
self.tasks.append(task)
def run(self):
"""Run all the tasks added by invocking the add method.
Stop immediately if a task fails.
"""
self.cli.expectTaskCount(self.taskToRun())
for task in self.tasks:
self.cli.newTask(task.name)
task.run(self.cli)
if task.failed:
return
def taskToRun(self):
"Return the number of tasks to run."
return len(self.tasks)
class Task():
"A task executed by the Task Engine"
def __init__(self, name):
self.name = name
self.failed = False
def run(self, cli):
"Perform the work of this task."
pass
def __key(self):
return self.name
def __eq__(x, y):
if type(x) != type(y):
return False
return x.__key() == y.__key()
def __hash__(self):
return hash(self.__key())
def __repr__(self):
return self.name
class BatchCli():
"""This class provides a simple API to ask input to the user and
track the progress of tasks execution sending message to a cli.
User create an instance of this class specifying the number of total
tasks that must be executed and the cli to which the output shall be
sent.
The progress of execution of tasks is tracked with progressive numbers
in the form of current / total where the current value is the sequence
number of the current task, and the total number is the total number of tasks.
The current value is automatically determined when the method newMessage is called.
"""
def __init__(self, cli):
self.startMarker = '['
self.endMarker = ']'
self.cli = cli
self.tasksCount = 0
self.currentTask = 0
self.__buildHeaderTokens()
def expectTaskCount(self, tasksCount):
"Set the number of tasks the BatchCli is expected to run."
self.tasksCount = tasksCount
def newMessage(self, message):
"Send a new message to the cli."
output = self.__buildMessageOutput(message)
self.cli.log(output)
def newTask(self, taskName):
"""Send to the CLI a message saying the task passed as
parameter is starting execution."""
if self.currentTask == self.tasksCount:
raise RuntimeError("No more tasks expected")
self.currentTask += 1
output = self.__buildTaskOutput(taskName)
self.cli.log(output)
def confirm(self, question):
"""Send the question to the CLI and wait for an answer.
Return True if the answer is Y or y.
"""
answer = self.ask(question, ['Y', 'N'], 'Y')
return answer.upper() == 'Y' or answer == ""
def negate(self, question, suggest=['Y','N'], default=['N']):
"""Send the question to the CLI and wait for an answer.
Return True if the answer is N or n.
"""
answer = self.ask(question, ['Y', 'N'], 'N')
return answer.upper() == 'N' or answer == ""
def ask(self, question, options=[], default=None):
"""Ask a question to the CLI and return the answer.
Caller can specify a list of options for the answer and
the default answer. The default answer is returned if the CLI
receives a carriage return character.
If options are provided the answer must match one of the option
otherwise it is not accepted and a new request is doen by the CLI.
If options are provided by default is not raises a RuntimeError.
"""
if options and not default:
raise RuntimeError("Cannot call ask with options and no default")
if not options:
options_str = self.__getOptionsString(options, default)
answer = self.__getAnswer(question, options_str)
return self.__emptyStringToDefault(answer, default)
else:
return self.__askWithOptions(question, options, default)
def select(self, message, values):
default = values[0]
optionsString = self.__getOptionsString([], default)
output = self.__buildQuestionOutput(message, optionsString)
while True:
answer = self.__getAnswer(message, optionsString)
if answer in values:
return answer
elif answer == '':
return default
elif answer in 'Ll':
for value in values:
self.cli.log(self.__buildHeader(' ' + value))
def choose(self, message, values):
default = values[0]
optionsString = self.__getOptionsString([], default)
output = self.__buildQuestionOutput(message, optionsString)
while True:
output = self.__buildQuestionOutput(message, optionsString)
answer = self.cli.ask(output).strip()
int_answer = self.__to_int_answer(answer)
if int_answer > 0 and int_answer <= len(values):
return values[int_answer]
elif answer == '':
return default
elif answer in 'Ll':
for count in range(0, len(values)):
self.cli.log(self.__buildHeader(' ' + str(count + 1) + '. ' + values[count]))
def __to_int_answer(self, str_value):
try:
return int(str_value) - 1
except ValueError:
return -1
def __askWithOptions(self, question, options, default):
options_str = self.__getOptionsString(options, default)
while True:
answer = self.__getAnswer(question, options_str)
validAnswers = [option.lower() for option in options]
validAnswers.extend(options)
if answer in validAnswers:
return answer
elif answer == "":
return default
def __emptyStringToDefault(self, answer, default):
if answer == "":
return default
else:
return answer
def __getOptionsString(self, options, default):
result = []
optionOpenBracket = "["
if options:
result.append("(")
result.append("|".join(options))
result.append(")")
optionOpenBracket = " ["
if default:
result.append(optionOpenBracket)
result.append(default)
result.append("]")
return "".join(result)
def __getAnswer(self, question, options):
output = self.__buildQuestionOutput(question, options)
return self.cli.ask(output).strip()
def __buildQuestionOutput(self, message, options=""):
self.tokens[1] = " ? "
self.tokens[3] = message
if options != "":
newTokens = list(self.tokens)
newTokens.append(options)
return " ".join(newTokens)
return " ".join(self.tokens)
def __buildTaskOutput(self, message):
self.tokens[1] = self.__getProgressIndex()
self.tokens[3] = message
return " ".join(self.tokens)
def __buildMessageOutput(self, message):
self.tokens[1] = "..."
self.tokens[3] = message
return " ".join(self.tokens)
def __buildHeader(self, message):
self.tokens[1] = " - "
self.tokens[3] = message
return " ".join(self.tokens)
def __buildHeaderTokens(self):
self.tokens = []
self.tokens.append(self.startMarker)
self.tokens.append("")
self.tokens.append(self.endMarker)
self.tokens.append("")
def __getProgressIndex(self):
return str(self.currentTask) + "/" + str(self.tasksCount)
class Cli():
"The CLI expected by BatchCli. Should be implemented by subclassing."
def log(self, message):
pass
def ask(self, message):
pass
class SimpleCli():
"""A simple implementation of the CLI expected by BatchCli.
Print and read from Standard Input and Standard Output.
"""
def log(self, message):
"Print the message to Standard Ouput"
print message
def ask(self, message):
"Print the message to Standard Ouput and read the input from Standard Input."
return raw_input(message)
if __name__ == "__main__":
class Print(Task):
"Simple Task: do nothing more than printing ..."
def run(self, cli):
cli.newMessage("...")
class CookingEggs(Task):
"Simple Task: do nothing more than printing ..."
def run(self, cli):
eggs = cli.ask("How many eggs?", ["1", "2", "3"], "1")
cli.newMessage("Break the eggs ...")
cli.newMessage("Throw the eggshell ...")
cli.newMessage("Put the eggs into the pan ...")
cli = SimpleCli()
engine = TaskEngine(cli)
engine.addTask(Print("Put oil in the pan"))
engine.addTask(Print("Turn fire on"))
#engine.addTask(CookingEggs("Cooking the eggs"))
engine.addTask(Print("Wait the eggs is cooked"))
engine.addTask(Print("Put the eggs in the dish"))
engine.addTask(Print("Add salt to the eggs and eat it!"))
engine.run()
| |
from collections import deque
import gym
import gym_minigrid
import numpy as np
import sys
import unittest
import ray
from ray import tune
from ray.rllib.agents.callbacks import DefaultCallbacks
import ray.rllib.agents.ppo as ppo
from ray.rllib.utils.test_utils import check_learning_achieved, \
framework_iterator
from ray.rllib.utils.numpy import one_hot
from ray.tune import register_env
class MyCallBack(DefaultCallbacks):
def __init__(self):
super().__init__()
self.deltas = []
def on_postprocess_trajectory(self, *, worker, episode, agent_id,
policy_id, policies, postprocessed_batch,
original_batches, **kwargs):
pos = np.argmax(postprocessed_batch["obs"], -1)
x, y = pos % 8, pos // 8
self.deltas.extend((x**2 + y**2)**0.5)
def on_sample_end(self, *, worker, samples, **kwargs):
print("mean. distance from origin={}".format(np.mean(self.deltas)))
self.deltas = []
class OneHotWrapper(gym.core.ObservationWrapper):
def __init__(self, env, vector_index, framestack):
super().__init__(env)
self.framestack = framestack
# 49=7x7 field of vision; 11=object types; 6=colors; 3=state types.
# +4: Direction.
self.single_frame_dim = 49 * (11 + 6 + 3) + 4
self.init_x = None
self.init_y = None
self.x_positions = []
self.y_positions = []
self.x_y_delta_buffer = deque(maxlen=100)
self.vector_index = vector_index
self.frame_buffer = deque(maxlen=self.framestack)
for _ in range(self.framestack):
self.frame_buffer.append(np.zeros((self.single_frame_dim, )))
self.observation_space = gym.spaces.Box(
0.0,
1.0,
shape=(self.single_frame_dim * self.framestack, ),
dtype=np.float32)
def observation(self, obs):
# Debug output: max-x/y positions to watch exploration progress.
if self.step_count == 0:
for _ in range(self.framestack):
self.frame_buffer.append(np.zeros((self.single_frame_dim, )))
if self.vector_index == 0:
if self.x_positions:
max_diff = max(
np.sqrt((np.array(self.x_positions) - self.init_x)**2 +
(np.array(self.y_positions) - self.init_y)**2))
self.x_y_delta_buffer.append(max_diff)
print("100-average dist travelled={}".format(
np.mean(self.x_y_delta_buffer)))
self.x_positions = []
self.y_positions = []
self.init_x = self.agent_pos[0]
self.init_y = self.agent_pos[1]
# Are we carrying the key?
# if self.carrying is not None:
# print("Carrying KEY!!")
self.x_positions.append(self.agent_pos[0])
self.y_positions.append(self.agent_pos[1])
# One-hot the last dim into 11, 6, 3 one-hot vectors, then flatten.
objects = one_hot(obs[:, :, 0], depth=11)
colors = one_hot(obs[:, :, 1], depth=6)
states = one_hot(obs[:, :, 2], depth=3)
# Is the door we see open?
# for x in range(7):
# for y in range(7):
# if objects[x, y, 4] == 1.0 and states[x, y, 0] == 1.0:
# print("Door OPEN!!")
all_ = np.concatenate([objects, colors, states], -1)
all_flat = np.reshape(all_, (-1, ))
direction = one_hot(
np.array(self.agent_dir), depth=4).astype(np.float32)
single_frame = np.concatenate([all_flat, direction])
self.frame_buffer.append(single_frame)
return np.concatenate(self.frame_buffer)
def env_maker(config):
name = config.get("name", "MiniGrid-Empty-5x5-v0")
framestack = config.get("framestack", 4)
env = gym.make(name)
# Only use image portion of observation (discard goal and direction).
env = gym_minigrid.wrappers.ImgObsWrapper(env)
env = OneHotWrapper(
env,
config.vector_index if hasattr(config, "vector_index") else 0,
framestack=framestack)
return env
register_env("mini-grid", env_maker)
CONV_FILTERS = [[16, [11, 11], 3], [32, [9, 9], 3], [64, [5, 5], 3]]
class TestCuriosity(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=3)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_curiosity_on_frozen_lake(self):
config = ppo.DEFAULT_CONFIG.copy()
# A very large frozen-lake that's hard for a random policy to solve
# due to 0.0 feedback.
config["env"] = "FrozenLake-v0"
config["env_config"] = {
"desc": [
"SFFFFFFF",
"FFFFFFFF",
"FFFFFFFF",
"FFFFFFFF",
"FFFFFFFF",
"FFFFFFFF",
"FFFFFFFF",
"FFFFFFFG",
],
"is_slippery": False
}
# Print out observations to see how far we already get inside the Env.
config["callbacks"] = MyCallBack
# Limit horizon to make it really hard for non-curious agent to reach
# the goal state.
config["horizon"] = 16
# Local only.
config["num_workers"] = 0
config["lr"] = 0.001
num_iterations = 10
for fw in framework_iterator(config):
# W/ Curiosity. Expect to learn something.
config["exploration_config"] = {
"type": "Curiosity",
"eta": 0.2,
"lr": 0.001,
"feature_dim": 128,
"feature_net_config": {
"fcnet_hiddens": [],
"fcnet_activation": "relu",
},
"sub_exploration": {
"type": "StochasticSampling",
}
}
trainer = ppo.PPOTrainer(config=config)
learnt = False
for i in range(num_iterations):
result = trainer.train()
print(result)
if result["episode_reward_max"] > 0.0:
print("Reached goal after {} iters!".format(i))
learnt = True
break
trainer.stop()
self.assertTrue(learnt)
# Disable this check for now. Add too much flakyness to test.
# if fw == "tf":
# # W/o Curiosity. Expect to learn nothing.
# print("Trying w/o curiosity (not expected to learn).")
# config["exploration_config"] = {
# "type": "StochasticSampling",
# }
# trainer = ppo.PPOTrainer(config=config)
# rewards_wo = 0.0
# for _ in range(num_iterations):
# result = trainer.train()
# rewards_wo += result["episode_reward_mean"]
# print(result)
# trainer.stop()
# self.assertTrue(rewards_wo == 0.0)
# print("Did not reach goal w/o curiosity!")
def test_curiosity_on_partially_observable_domain(self):
config = ppo.DEFAULT_CONFIG.copy()
config["env"] = "mini-grid"
config["env_config"] = {
# Also works with:
# - MiniGrid-MultiRoom-N4-S5-v0
# - MiniGrid-MultiRoom-N2-S4-v0
"name": "MiniGrid-Empty-8x8-v0",
"framestack": 1, # seems to work even w/o framestacking
}
config["horizon"] = 15 # Make it impossible to reach goal by chance.
config["num_envs_per_worker"] = 4
config["model"]["fcnet_hiddens"] = [256, 256]
config["model"]["fcnet_activation"] = "relu"
config["num_sgd_iter"] = 8
config["num_workers"] = 0
config["exploration_config"] = {
"type": "Curiosity",
# For the feature NN, use a non-LSTM fcnet (same as the one
# in the policy model).
"eta": 0.1,
"lr": 0.0003, # 0.0003 or 0.0005 seem to work fine as well.
"feature_dim": 64,
# No actual feature net: map directly from observations to feature
# vector (linearly).
"feature_net_config": {
"fcnet_hiddens": [],
"fcnet_activation": "relu",
},
"sub_exploration": {
"type": "StochasticSampling",
}
}
min_reward = 0.001
stop = {
"training_iteration": 25,
"episode_reward_mean": min_reward,
}
for _ in framework_iterator(config, frameworks="torch"):
# To replay:
# trainer = ppo.PPOTrainer(config=config)
# trainer.restore("[checkpoint file]")
# env = env_maker(config["env_config"])
# s = env.reset()
# for _ in range(10000):
# s, r, d, _ = env.step(trainer.compute_single_action(s))
# if d:
# s = env.reset()
# env.render()
results = tune.run("PPO", config=config, stop=stop, verbose=1)
check_learning_achieved(results, min_reward)
iters = results.trials[0].last_result["training_iteration"]
print("Reached in {} iterations.".format(iters))
# config_wo = config.copy()
# config_wo["exploration_config"] = {"type": "StochasticSampling"}
# stop_wo = stop.copy()
# stop_wo["training_iteration"] = iters
# results = tune.run(
# "PPO", config=config_wo, stop=stop_wo, verbose=1)
# try:
# check_learning_achieved(results, min_reward)
# except ValueError:
# print("Did not learn w/o curiosity (expected).")
# else:
# raise ValueError("Learnt w/o curiosity (not expected)!")
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| |
import requests
import xml.etree.ElementTree as XML
from decimal import Decimal
import defaults, templates, tests
class USPS():
def __init__(self, user_id=None, settings_file=defaults, templates=templates):
self.request = None
self.user_id = user_id if user_id else settings_file.USPS_USER_ID
self.base_shipping_fee = settings_file.USPS_BASE_SHIPPING_FEE
self.merchant_zipcode = settings_file.USPS_MERCHANT_ORIGIN_ZIPCODE
self.base_url = settings_file.USPS_TEST_URL if settings_file.USPS_TESTMODE else settings_file.USPS_PRODUCTION_URL
#self.default_domestic_package_data = settings_file.USPS_DEFAULT_DOMESTIC_PACKAGE_DATA
#self.default_international_package_data = settings_file.USPS_DEFAULT_INTERNATIONAL_PACKAGE_DATA
self.default_domestic_package_data = {}
self.templates = templates
##########################
# ADDRESS VERIFICATION #
##########################
def verify_address(self, address_data={}, api='Verify'):
#login info
xml = XML.fromstring(self.templates.address_template)
xml.set('USERID', self.user_id)
#address bindings
address = xml.find('Address')
for key, value in address_data.iteritems():
address.find(key).text = value
#request
self.request = requests.get('%s?API=%s&XML=%s' % (self.base_url, api, XML.tostring(xml)))
response_xml = XML.fromstring(self.request.text)
#error checking
error = response_xml.find('.//Description')
response = {
'error':error.text if error is not None else None,
'value':True if error is None else False,
'xml':response_xml
}
return response
def lookup_zipcode(self, address_data={}, api='ZipCodeLookup'):
#login info
xml = XML.fromstring(self.templates.zipcode_template)
xml.set('USERID', self.user_id)
#address bindings
address = xml.find('Address')
for key, value in address_data.iteritems():
address.find(key).text = value
#request
self.request = requests.get('%s?API=%s&XML=%s' % (self.base_url, api, XML.tostring(xml)))
response_xml = XML.fromstring(self.request.text)
#error checking
error = response_xml.find('.//Description')
response = {
'error':error.text if error is not None else None,
'value':response_xml.find('.//Zip5').text if error is None else error.text,
'xml':response_xml
}
return response
def loopkup_city_state(self, zipcode, api='CityStateLookup'):
#login info
xml = XML.fromstring(self.templates.city_state_template)
xml.set('USERID', self.user_id)
#address bindings
xml.find('.//Zip5').text = zipcode
#request
self.request = requests.get('%s?API=%s&XML=%s' % (self.base_url, api, XML.tostring(xml)))
response_xml = XML.fromstring(self.request.text)
#error checking
error = response_xml.find('.//Description')
city = response_xml.find('.//City').text if error is None else None
state = response_xml.find('.//State').text if error is None else None
response = {
'error':error.text if error is not None else None,
'value':{'city':city, 'state':state},
'xml':response_xml
}
return response
##########################
# SHIPPING PRICES #
##########################
#dimensions.lbs, .ounces, .container_shape, .size, .width, .length, .height, .girth
def price_domestic_shipping(self, shipping_type, destination_zipcode, package_data=None, api='RateV4'):
details = package_data if package_data else self.default_domestic_package_data
#login info
xml = XML.fromstring(self.templates.domestic_shipping_template)
xml.set('USERID', self.user_id)
#package binding
package = xml.find('Package')
package.find('ZipOrigination').text = self.merchant_zipcode
package.find('ZipDestination').text = destination_zipcode
package.find('Service').text = shipping_type
for key, value in details.iteritems():
package.find(key).text = value
#request
self.request = requests.get('%s?API=%s&XML=%s' % (self.base_url, api, XML.tostring(xml)))
response_xml = XML.fromstring(self.request.text)
#error checking
error = response_xml.find('.//Description')
rate = Decimal(response_xml.find('.//Rate').text) if error is None else None
response = {
'error':error.text if error is not None else None,
'value':rate,
'xml':response_xml
}
return response
def price_international_shipping(self, package_data, flags={}, api='IntlRateV2'):
details = package_data if package_data else self.default_international_package_data
#login info
xml = XML.fromstring(self.templates.international_shipping_template)
xml.set('USERID', self.user_id)
#package data
package = xml.find('Package')
for key, value in details.iteritems():
package.find(key).text = value
#gifts and POBox
gxg = package.find('GXG')
for key, value in flags.iteritems():
gxg.find(key).text = value
#request
self.request = requests.get('%s?API=%s&XML=%s' % (self.base_url, api, XML.tostring(xml)))
response_xml = XML.fromstring(self.request.text)
error = response_xml.find('.//Description')
#returns multiple shipping options
services = response_xml.find('Package').findall('Service')
rates={}
if error is None:
for service in services:
service_id = service.get('ID')
Postage = service.find('Postage')
CommercialPostage = service.find('CommercialPostage')
SvcCommitments = service.find('SvcCommitments')
SvcDescription = service.find('SvcDescription')
rates[str(service_id)] = {
'Postage': Decimal(Postage.text) if Postage is not None else None,
'CommercialPostage':Decimal(CommercialPostage.text) if CommercialPostage is not None else None,
'SvcCommitments':SvcCommitments.text if SvcCommitments is not None else None,
'SvcDescription':SvcDescription.text if SvcDescription is not None else None,
}
response = {
'error':error.text if error is not None else None,
'value': rates,
'xml':response_xml
}
return response
###############
# TRACKING #
###############
def track_package(self, tracking_id, api='TrackV2'):
#login info
xml = XML.fromstring(self.templates.tracking_template)
xml.set('USERID', self.user_id)
#tacking_id
tracking_element = xml.find('TrackID')
tracking_element.set('ID', tracking_id)
#request
self.request = requests.get('%s?API=%s&XML=%s' % (self.base_url, api, XML.tostring(xml)))
response_xml = XML.fromstring(self.request.text)
#error checking
error = response_xml.find('.//Description')
status = response_xml.find('.//TrackSummary')
response = {
'error':error.text if error is not None else None,
'value': status.text if status is not None else None,
'xml':response_xml
}
return response
######################
# UTILITIES #
######################
# def step_dictionary(xml, obj):
# for key, value in obj.iteritems():
# child = XML.SubElement(xml, key)
# if type(value) is dict:
# step_dictionary(child, value)
# else:
# child.text = value
# def dictionary_to_xml(obj, user_id, root_element):
# root = XML.Element(root_element)
# root.set('USERID', user_id)
# step_dictionary(root, obj)
# return root
def ounces_to_lbs(ounces):
#returns object with lbs and ounces
pass
def lbs_to_ounces(lbs):
pass
#step 1 run all tests and then call usps and get production certified
#tests.run_all_tests(defaults.USPS_USER_ID)
#step 2 run these examples check the results
# response = usps.loopkup_city_state('84115')
# print response['status']
# response = usps.lookup_zipcode({'Address2':'244 Edison', 'City':'Salt Lake City', 'State':'UT'})
# print response['status']
# response = usps.verify_address({'Address2':'244 Edison', 'City':'Salt Lake City', 'State':'UT'})
# print response['status']
# response = usps.track_package('EJ958083578US')
# print response['status']
# response = usps.price_domestic_shipping('PRIORITY','84105')
# print response['rate']
# package_data = defaults.USPS_DEFAULT_INTERNATIONAL_PACKAGE_DATA.update({'ValueOfContents':'50'})
# response = usps.price_international_shipping(package_data)
# print len(response['services'])
| |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the exploration editor page."""
__author__ = 'Sean Lip'
import os
import StringIO
import zipfile
from core.controllers import editor
from core.domain import config_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_domain
from core.domain import rights_manager
from core.tests import test_utils
import feconf
class BaseEditorControllerTest(test_utils.GenericTestBase):
CAN_EDIT_STR = 'GLOBALS.can_edit = JSON.parse(\'true\');'
CANNOT_EDIT_STR = 'GLOBALS.can_edit = JSON.parse(\'false\');'
def setUp(self):
"""Completes the sign-up process for self.EDITOR_EMAIL."""
super(BaseEditorControllerTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.set_admins([self.ADMIN_EMAIL])
def assert_can_edit(self, response_body):
"""Returns True if the response body indicates that the exploration is
editable."""
self.assertIn(self.CAN_EDIT_STR, response_body)
self.assertNotIn(self.CANNOT_EDIT_STR, response_body)
def assert_cannot_edit(self, response_body):
"""Returns True if the response body indicates that the exploration is
not editable."""
self.assertIn(self.CANNOT_EDIT_STR, response_body)
self.assertNotIn(self.CAN_EDIT_STR, response_body)
class EditorTest(BaseEditorControllerTest):
def test_editor_page(self):
"""Test access to editor pages for the sample exploration."""
exp_services.delete_demo('0')
exp_services.load_demo('0')
# Check that non-editors can access, but not edit, the editor page.
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 200)
self.assertIn('Welcome to Oppia!', response.body)
self.assert_cannot_edit(response.body)
# Log in as an editor.
self.login(self.EDITOR_EMAIL)
# Check that it is now possible to access and edit the editor page.
response = self.testapp.get('/create/0')
self.assertIn('Welcome to Oppia!', response.body)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
self.assertIn('Stats', response.body)
self.assertIn('History', response.body)
# Test that the value generator JS is included.
self.assertIn('RandomSelector', response.body)
self.logout()
def test_new_state_template(self):
"""Test the validity of the NEW_STATE_TEMPLATE."""
exp_services.load_demo('0')
exploration = exp_services.get_exploration_by_id('0')
exploration.add_states([feconf.DEFAULT_INIT_STATE_NAME])
new_state_dict = exploration.states[
feconf.DEFAULT_INIT_STATE_NAME].to_dict()
new_state_dict['unresolved_answers'] = {}
self.assertEqual(new_state_dict, editor.NEW_STATE_TEMPLATE)
def test_add_new_state_error_cases(self):
"""Test the error cases for adding a new state to an exploration."""
exp_services.delete_demo('0')
exp_services.load_demo('0')
CURRENT_VERSION = 1
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/0')
csrf_token = self.get_csrf_token_from_response(response)
def _get_payload(new_state_name, version=None):
result = {
'change_list': [{
'cmd': 'add_state',
'state_name': new_state_name
}],
'commit_message': 'Add new state',
}
if version is not None:
result['version'] = version
return result
def _put_and_expect_400_error(payload):
return self.put_json(
'/createhandler/data/0', payload, csrf_token,
expect_errors=True, expected_status_int=400)
# A request with no version number is invalid.
response_dict = _put_and_expect_400_error(_get_payload('New state'))
self.assertIn('a version must be specified', response_dict['error'])
# A request with the wrong version number is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('New state', 123))
self.assertIn('which is too old', response_dict['error'])
# A request with an empty state name is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('', CURRENT_VERSION))
self.assertIn('should be between 1 and 50', response_dict['error'])
# A request with a really long state name is invalid.
response_dict = _put_and_expect_400_error(
_get_payload('a' * 100, CURRENT_VERSION))
self.assertIn('should be between 1 and 50', response_dict['error'])
# A request with a state name containing invalid characters is
# invalid.
response_dict = _put_and_expect_400_error(
_get_payload('[Bad State Name]', CURRENT_VERSION))
self.assertIn('Invalid character [', response_dict['error'])
# A request with a state name of feconf.END_DEST is invalid.
response_dict = _put_and_expect_400_error(
_get_payload(feconf.END_DEST, CURRENT_VERSION))
self.assertIn('Invalid state name', response_dict['error'])
# Even if feconf.END_DEST is mixed case, it is still invalid.
response_dict = _put_and_expect_400_error(
_get_payload('eNd', CURRENT_VERSION))
self.assertEqual('eNd'.lower(), feconf.END_DEST.lower())
self.assertIn('Invalid state name', response_dict['error'])
# A name cannot have spaces at the front or back.
response_dict = _put_and_expect_400_error(
_get_payload(' aa', CURRENT_VERSION))
self.assertIn('start or end with whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('aa\t', CURRENT_VERSION))
self.assertIn('end with whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('\n', CURRENT_VERSION))
self.assertIn('end with whitespace', response_dict['error'])
# A name cannot have consecutive whitespace.
response_dict = _put_and_expect_400_error(
_get_payload('The B', CURRENT_VERSION))
self.assertIn('Adjacent whitespace', response_dict['error'])
response_dict = _put_and_expect_400_error(
_get_payload('The\t\tB', CURRENT_VERSION))
self.assertIn('Adjacent whitespace', response_dict['error'])
self.logout()
def test_resolved_answers_handler(self):
exp_services.delete_demo('0')
exp_services.load_demo('0')
# In the reader perspective, submit the first multiple-choice answer,
# then submit 'blah' once, 'blah2' twice and 'blah3' three times.
# TODO(sll): Use the ExplorationPlayer in reader_test for this.
exploration_dict = self.get_json(
'%s/0' % feconf.EXPLORATION_INIT_URL_PREFIX)
self.assertEqual(
exploration_dict['exploration']['title'], 'Welcome to Oppia!')
state_name = exploration_dict['exploration']['init_state_name']
exploration_dict = self.submit_answer('0', state_name, '0')
state_name = exploration_dict['state_name']
self.submit_answer('0', state_name, 'blah')
for _ in range(2):
self.submit_answer('0', state_name, 'blah2')
for _ in range(3):
self.submit_answer('0', state_name, 'blah3')
# Log in as an editor.
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/0')
csrf_token = self.get_csrf_token_from_response(response)
url = str('/createhandler/resolved_answers/0/%s' % state_name)
def _get_unresolved_answers():
return stats_domain.StateRuleAnswerLog.get(
'0', state_name, feconf.SUBMIT_HANDLER_NAME,
exp_domain.DEFAULT_RULESPEC_STR
).answers
self.assertEqual(
_get_unresolved_answers(), {'blah': 1, 'blah2': 2, 'blah3': 3})
# An empty request should result in an error.
response_dict = self.put_json(
url, {'something_else': []}, csrf_token,
expect_errors=True, expected_status_int=400)
self.assertIn('Expected a list', response_dict['error'])
# A request of the wrong type should result in an error.
response_dict = self.put_json(
url, {'resolved_answers': 'this_is_a_string'}, csrf_token,
expect_errors=True, expected_status_int=400)
self.assertIn('Expected a list', response_dict['error'])
# Trying to remove an answer that wasn't submitted has no effect.
response_dict = self.put_json(
url, {'resolved_answers': ['not_submitted_answer']}, csrf_token)
self.assertEqual(
_get_unresolved_answers(), {'blah': 1, 'blah2': 2, 'blah3': 3})
# A successful request should remove the answer in question.
response_dict = self.put_json(
url, {'resolved_answers': ['blah']}, csrf_token)
self.assertEqual(
_get_unresolved_answers(), {'blah2': 2, 'blah3': 3})
# It is possible to remove more than one answer at a time.
response_dict = self.put_json(
url, {'resolved_answers': ['blah2', 'blah3']}, csrf_token)
self.assertEqual(_get_unresolved_answers(), {})
self.logout()
class DownloadIntegrationTest(BaseEditorControllerTest):
"""Test handler for exploration and state download."""
SAMPLE_JSON_CONTENT = {
"State A": ("""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: State A
feedback: []
param_changes: []
id: TextInput
param_changes: []
"""),
"State B": ("""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: State B
feedback: []
param_changes: []
id: TextInput
param_changes: []
"""),
feconf.DEFAULT_INIT_STATE_NAME: ("""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: %s
feedback: []
param_changes: []
id: TextInput
param_changes: []
""") % feconf.DEFAULT_INIT_STATE_NAME
}
SAMPLE_STATE_STRING = (
"""content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: State A
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
def test_exploration_download_handler_for_default_exploration(self):
self.login(self.EDITOR_EMAIL)
self.OWNER_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
# Create a simple exploration
EXP_ID = 'eid'
self.save_new_valid_exploration(
EXP_ID, self.OWNER_ID,
title='The title for ZIP download handler test!',
category='This is just a test category',
objective='')
exploration = exp_services.get_exploration_by_id(EXP_ID)
exploration.states[exploration.init_state_name].interaction.handlers[
0].rule_specs[0].dest = exploration.init_state_name
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
exploration.states['State 3'].update_interaction_id('TextInput')
exploration.rename_state('State 2', 'State B')
exploration.delete_state('State 3')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
response = self.testapp.get('/create/%s' % EXP_ID)
# Check download to zip file
# Download to zip file using download handler
EXPLORATION_DOWNLOAD_URL = '/createhandler/download/%s' % EXP_ID
response = self.testapp.get(EXPLORATION_DOWNLOAD_URL)
# Check downloaded zip file
self.assertEqual(response.headers['Content-Type'], 'text/plain')
filename = 'oppia-ThetitleforZIPdownloadhandlertest!-v2.zip'
self.assertEqual(response.headers['Content-Disposition'],
'attachment; filename=%s' % str(filename))
zf_saved = zipfile.ZipFile(StringIO.StringIO(response.body))
self.assertEqual(
zf_saved.namelist(),
['The title for ZIP download handler test!.yaml'])
# Load golden zip file
with open(os.path.join(
feconf.TESTS_DATA_DIR,
'oppia-ThetitleforZIPdownloadhandlertest!-v2-gold.zip'),
'rb') as f:
golden_zipfile = f.read()
zf_gold = zipfile.ZipFile(StringIO.StringIO(golden_zipfile))
# Compare saved with golden file
self.assertEqual(
zf_saved.open(
'The title for ZIP download handler test!.yaml'
).read(),
zf_gold.open(
'The title for ZIP download handler test!.yaml'
).read())
# Check download to JSON
exploration.update_objective('Test JSON download')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
# Download to JSON string using download handler
self.maxDiff = None
EXPLORATION_DOWNLOAD_URL = (
'/createhandler/download/%s?output_format=%s&width=50' %
(EXP_ID, feconf.OUTPUT_FORMAT_JSON))
response = self.get_json(EXPLORATION_DOWNLOAD_URL)
# Check downloaded dict
self.assertEqual(self.SAMPLE_JSON_CONTENT, response)
self.logout()
def test_state_download_handler_for_default_exploration(self):
self.login(self.EDITOR_EMAIL)
self.OWNER_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
# Create a simple exploration
EXP_ID = 'eid'
self.save_new_valid_exploration(
EXP_ID, self.OWNER_ID,
title='The title for states download handler test!',
category='This is just a test category')
exploration = exp_services.get_exploration_by_id(EXP_ID)
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
exploration.states['State 3'].update_interaction_id('TextInput')
exploration.rename_state('State 2', 'State B')
exploration.delete_state('State 3')
exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
response = self.testapp.get('/create/%s' % EXP_ID)
# Check download state as YAML string
self.maxDiff = None
state_name = 'State%20A'
EXPLORATION_DOWNLOAD_URL = (
'/createhandler/download_state/%s?state=%s&width=50' %
(EXP_ID, state_name))
response = self.testapp.get(EXPLORATION_DOWNLOAD_URL)
self.assertEqual(self.SAMPLE_STATE_STRING, response.body)
self.logout()
class ExplorationDeletionRightsTest(BaseEditorControllerTest):
def test_deletion_rights_for_unpublished_exploration(self):
"""Test rights management for deletion of unpublished explorations."""
UNPUBLISHED_EXP_ID = 'unpublished_eid'
exploration = exp_domain.Exploration.create_default_exploration(
UNPUBLISHED_EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.owner_id, exploration)
rights_manager.assign_role(
self.owner_id, UNPUBLISHED_EXP_ID, self.editor_id,
rights_manager.ROLE_EDITOR)
self.login(self.EDITOR_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % UNPUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.VIEWER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % UNPUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.OWNER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % UNPUBLISHED_EXP_ID)
self.assertEqual(response.status_int, 200)
self.logout()
def test_deletion_rights_for_published_exploration(self):
"""Test rights management for deletion of published explorations."""
PUBLISHED_EXP_ID = 'published_eid'
exploration = exp_domain.Exploration.create_default_exploration(
PUBLISHED_EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.owner_id, exploration)
rights_manager.assign_role(
self.owner_id, PUBLISHED_EXP_ID, self.editor_id,
rights_manager.ROLE_EDITOR)
rights_manager.publish_exploration(self.owner_id, PUBLISHED_EXP_ID)
self.login(self.EDITOR_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.VIEWER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.OWNER_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 401)
self.logout()
self.login(self.ADMIN_EMAIL)
response = self.testapp.delete(
'/createhandler/data/%s' % PUBLISHED_EXP_ID)
self.assertEqual(response.status_int, 200)
self.logout()
class VersioningIntegrationTest(BaseEditorControllerTest):
"""Test retrieval of and reverting to old exploration versions."""
def setUp(self):
"""Create exploration with two versions"""
super(VersioningIntegrationTest, self).setUp()
self.EXP_ID = '0'
exp_services.delete_demo(self.EXP_ID)
exp_services.load_demo(self.EXP_ID)
self.login(self.EDITOR_EMAIL)
# In version 2, change the objective and the initial state content.
exploration = exp_services.get_exploration_by_id(self.EXP_ID)
exp_services.update_exploration(
self.EDITOR_EMAIL, self.EXP_ID, [{
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective',
}, {
'cmd': 'edit_state_property',
'property_name': 'content',
'state_name': exploration.init_state_name,
'new_value': [{'type': 'text', 'value': 'ABC'}],
}], 'Change objective and init state content')
def test_reverting_to_old_exploration(self):
"""Test reverting to old exploration versions."""
# Open editor page
response = self.testapp.get(
'%s/%s' % (feconf.EDITOR_URL_PREFIX, self.EXP_ID))
csrf_token = self.get_csrf_token_from_response(response)
# May not revert to any version that's not 1
for rev_version in (-1, 0, 2, 3, 4, '1', ()):
response_dict = self.post_json(
'/createhandler/revert/%s' % self.EXP_ID, {
'current_version': 2,
'revert_to_version': rev_version
}, csrf_token, expect_errors=True, expected_status_int=400)
# Check error message
if not isinstance(rev_version, int):
self.assertIn('Expected an integer', response_dict['error'])
else:
self.assertIn('Cannot revert to version',
response_dict['error'])
# Check that exploration is really not reverted to old version
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# Revert to version 1
rev_version = 1
response_dict = self.post_json(
'/createhandler/revert/%s' % self.EXP_ID, {
'current_version': 2,
'revert_to_version': rev_version
}, csrf_token)
# Check that exploration is really reverted to version 1
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertNotIn('ABC', init_content)
self.assertIn('Hi, welcome to Oppia!', init_content)
def test_versioning_for_default_exploration(self):
"""Test retrieval of old exploration versions."""
# The latest version contains 'ABC'.
reader_dict = self.get_json(
'%s/%s' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# v1 contains 'Hi, welcome to Oppia!'.
reader_dict = self.get_json(
'%s/%s?v=1' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('Hi, welcome to Oppia!', init_content)
self.assertNotIn('ABC', init_content)
# v2 contains 'ABC'.
reader_dict = self.get_json(
'%s/%s?v=2' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID))
init_state_name = reader_dict['exploration']['init_state_name']
init_state_data = (
reader_dict['exploration']['states'][init_state_name])
init_content = init_state_data['content'][0]['value']
self.assertIn('ABC', init_content)
self.assertNotIn('Hi, welcome to Oppia!', init_content)
# v3 does not exist.
response = self.testapp.get(
'%s/%s?v=3' % (feconf.EXPLORATION_INIT_URL_PREFIX, self.EXP_ID),
expect_errors=True)
self.assertEqual(response.status_int, 404)
class ExplorationEditRightsTest(BaseEditorControllerTest):
"""Test the handling of edit rights for explorations."""
def test_user_banning(self):
"""Test that banned users are banned."""
EXP_ID = '0'
exp_services.delete_demo(EXP_ID)
exp_services.load_demo(EXP_ID)
# Sign-up new editors Joe and Sandra.
self.signup('joe@example.com', 'joe')
self.signup('sandra@example.com', 'sandra')
# Joe logs in.
self.login('joe@example.com')
response = self.testapp.get(feconf.GALLERY_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
# Ban joe.
config_services.set_property(
feconf.ADMIN_COMMITTER_ID, 'banned_usernames', ['joe'])
# Test that Joe is banned. (He can still access the gallery.)
response = self.testapp.get(feconf.GALLERY_URL, expect_errors=True)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/%s' % EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 200)
self.assert_cannot_edit(response.body)
# Joe logs out.
self.logout()
# Sandra logs in and is unaffected.
self.login('sandra@example.com')
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
self.logout()
class ExplorationRightsIntegrationTest(BaseEditorControllerTest):
"""Test the handler for managing exploration editing rights."""
COLLABORATOR_EMAIL = 'collaborator@example.com'
COLLABORATOR_USERNAME = 'collab'
COLLABORATOR2_EMAIL = 'collaborator2@example.com'
COLLABORATOR2_USERNAME = 'collab2'
COLLABORATOR3_EMAIL = 'collaborator3@example.com'
COLLABORATOR3_USERNAME = 'collab3'
VIEWER2_EMAIL = 'viewer2@example.com'
def test_exploration_rights_handler(self):
"""Test exploration rights handler."""
# Create several users
self.signup(
self.COLLABORATOR_EMAIL, username=self.COLLABORATOR_USERNAME)
self.signup(
self.COLLABORATOR2_EMAIL, username=self.COLLABORATOR2_USERNAME)
self.signup(
self.COLLABORATOR3_EMAIL, username=self.COLLABORATOR3_USERNAME)
self.collaborator_id = self.get_user_id_from_email(
self.COLLABORATOR_EMAIL)
# Owner creates exploration
self.login(self.OWNER_EMAIL)
EXP_ID = 'eid'
self.save_new_valid_exploration(
EXP_ID, self.owner_id, title='Title for rights handler test!',
category='My category')
exploration = exp_services.get_exploration_by_id(EXP_ID)
exploration.add_states(['State A', 'State 2', 'State 3'])
exploration.states['State A'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
exploration.states['State 3'].update_interaction_id('TextInput')
response = self.testapp.get(
'%s/%s' % (feconf.EDITOR_URL_PREFIX, EXP_ID))
csrf_token = self.get_csrf_token_from_response(response)
# Owner adds rights for other users
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, EXP_ID)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.VIEWER_USERNAME,
'new_member_role': rights_manager.ROLE_VIEWER
}, csrf_token)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR
}, csrf_token)
self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR2_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR
}, csrf_token)
self.logout()
# Check that viewer can access editor page but cannot edit.
self.login(self.VIEWER_EMAIL)
response = self.testapp.get('/create/%s' % EXP_ID, expect_errors=True)
self.assertEqual(response.status_int, 200)
self.assert_cannot_edit(response.body)
self.logout()
# Check that collaborator can access editor page and can edit.
self.login(self.COLLABORATOR_EMAIL)
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
csrf_token = self.get_csrf_token_from_response(response)
# Check that collaborator can add a new state called 'State 4'
add_url = '%s/%s' % (feconf.EXPLORATION_DATA_PREFIX, EXP_ID)
response_dict = self.put_json(
add_url,
{
'version': exploration.version,
'commit_message': 'Added State 4',
'change_list': [{
'cmd': 'add_state',
'state_name': 'State 4'
}, {
'cmd': 'edit_state_property',
'state_name': 'State 4',
'property_name': 'widget_id',
'new_value': 'TextInput',
}]
},
csrf_token=csrf_token,
expected_status_int=200
)
self.assertIn('State 4', response_dict['states'])
# Check that collaborator cannot add new members
exploration = exp_services.get_exploration_by_id(EXP_ID)
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, EXP_ID)
response_dict = self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR3_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR,
}, csrf_token, expect_errors=True, expected_status_int=401)
self.assertEqual(response_dict['code'], 401)
self.logout()
# Check that collaborator2 can access editor page and can edit.
self.login(self.COLLABORATOR2_EMAIL)
response = self.testapp.get('/create/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
self.assert_can_edit(response.body)
csrf_token = self.get_csrf_token_from_response(response)
# Check that collaborator2 can add a new state called 'State 5'
add_url = '%s/%s' % (feconf.EXPLORATION_DATA_PREFIX, EXP_ID)
response_dict = self.put_json(
add_url,
{
'version': exploration.version,
'commit_message': 'Added State 5',
'change_list': [{
'cmd': 'add_state',
'state_name': 'State 5'
}, {
'cmd': 'edit_state_property',
'state_name': 'State 5',
'property_name': 'widget_id',
'new_value': 'TextInput',
}]
},
csrf_token=csrf_token,
expected_status_int=200
)
self.assertIn('State 5', response_dict['states'])
# Check that collaborator2 cannot add new members
exploration = exp_services.get_exploration_by_id(EXP_ID)
rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, EXP_ID)
response_dict = self.put_json(
rights_url, {
'version': exploration.version,
'new_member_username': self.COLLABORATOR3_USERNAME,
'new_member_role': rights_manager.ROLE_EDITOR,
}, csrf_token, expect_errors=True, expected_status_int=401)
self.assertEqual(response_dict['code'], 401)
self.logout()
| |
import json
from deuce.util import client as p3k_swiftclient
from deuce.tests.util.mockfile import MockFile
from deuce.tests import V1Base
from swiftclient.exceptions import ClientException
import mock
import asyncio
class Response(object):
def __init__(self, status, content=None):
self.status = status
self.content = content
self.headers = {'etag': 'mock'}
if content:
fut = asyncio.Future(loop=None)
fut.set_result(content)
self.content.read = mock.Mock(return_value=fut)
def decode(self):
return self.content.decode()
class Content(object):
def __init__(self, contents):
self.contents = contents
def read(self):
pass
def decode(self):
if isinstance(self.contents, bytes):
return json.dumps(self.contents.decode())
else:
return json.dumps(self.contents)
def __iter__(self):
return (content for content in self.contents)
class Test_P3k_SwiftClient(V1Base):
def setUp(self):
self.storage_url = 'http://mock_storage_url.com'
self.token = self.create_auth_token()
self.vault = self.create_vault_id()
self.block = self.create_block_id()
self.blocks = ['mock1', 'mock2']
self.block_contents = [b'mock', b'mock']
self.response_dict = dict()
def test_put_container(self):
res = Response(201)
fut = asyncio.Future(loop=None)
fut.set_result(res)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
p3k_swiftclient.put_container(
self.storage_url,
self.token,
self.vault,
self.response_dict)
self.assertEqual(self.response_dict['status'], 201)
def test_head_container(self):
res = Response(200)
fut = asyncio.Future(loop=None)
fut.set_result(res)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
response = p3k_swiftclient.head_container(
self.storage_url,
self.token,
self.vault)
self.assertEqual(response, res.headers)
res_exception = Response(404)
fut = asyncio.Future(loop=None)
fut.set_result(res_exception)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
self.assertRaises(ClientException,
lambda: p3k_swiftclient.head_container(
self.storage_url,
self.token,
self.vault))
def test_get_container_marker_limit(self):
# With marker and limit
content = Content([{'name': 'mocka'},
{'name': 'mockb'},
{'name': 'mockc'}])
limit = 3
marker = 'mocka'
res = Response(200, content)
fut = asyncio.Future(loop=None)
fut.set_result(res)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
response = p3k_swiftclient.get_container(
self.storage_url,
self.token,
self.vault,
limit,
marker)
self.assertEqual(response, [part['name'] for part in content])
def test_get_container_no_marker_limit(self):
# With marker and limit as None
content = Content([{'name': 'mocka'},
{'name': 'mockb'},
{'name': 'mockc'}])
limit = None
marker = None
res = Response(200, content)
fut = asyncio.Future(loop=None)
fut.set_result(res)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
response = p3k_swiftclient.get_container(
self.storage_url,
self.token,
self.vault,
limit,
marker)
self.assertEqual(response, [part['name'] for part in content])
def test_get_non_existent_container(self):
content = Content([{'name': 'mock'}])
res = Response(404, content)
fut = asyncio.Future(loop=None)
fut.set_result(res)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
self.assertRaises(ClientException,
lambda: p3k_swiftclient.get_container(
self.storage_url,
self.token,
self.vault))
def test_delete_container(self):
res = Response(204)
fut = asyncio.Future(loop=None)
fut.set_result(res)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
p3k_swiftclient.delete_container(
self.storage_url,
self.token,
self.vault,
self.response_dict)
self.assertEqual(self.response_dict['status'], 204)
def test_put_object(self):
res = Response(201)
fut = asyncio.Future(loop=None)
fut.set_result(res)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
p3k_swiftclient.put_object(
self.storage_url,
self.token,
self.vault,
self.block,
'mock',
'4',
None,
self.response_dict)
self.assertEqual(self.response_dict['status'], 201)
p3k_swiftclient.put_object(
self.storage_url,
self.token,
self.vault,
'mock',
'mock',
'4',
'mock',
self.response_dict)
self.assertEqual(self.response_dict['status'], 201)
def test_put_async_object(self):
res = Response(201)
fut = asyncio.Future(loop=None)
fut.set_result(res)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
p3k_swiftclient.put_async_object(
self.storage_url,
self.token,
self.vault,
self.blocks,
self.block_contents,
False,
self.response_dict)
self.assertEqual(self.response_dict['status'], 201)
p3k_swiftclient.put_async_object(
self.storage_url,
self.token,
self.vault,
self.blocks,
self.block_contents,
True,
self.response_dict)
self.assertEqual(self.response_dict['status'], 201)
res = Response(202)
fut = asyncio.Future(loop=None)
fut.set_result(res)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
p3k_swiftclient.put_async_object(
self.storage_url,
self.token,
self.vault,
self.blocks,
self.block_contents,
False,
self.response_dict)
self.assertEqual(self.response_dict['status'], 500)
def test_head_object(self):
res = Response(204)
fut = asyncio.Future(loop=None)
fut.set_result(res)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
response = p3k_swiftclient.head_object(
self.storage_url,
self.token,
self.vault,
'mock')
self.assertEqual(res.headers, response)
res_exception = Response(404)
fut = asyncio.Future(loop=None)
fut.set_result(res_exception)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut)
self.assertRaises(ClientException,
lambda: p3k_swiftclient.head_object(
self.storage_url,
self.token,
self.vault,
'mock'))
def test_get_object(self):
mock_file = MockFile(10)
r = Response(200, mock_file)
fut1 = asyncio.Future(loop=None)
fut1.set_result(r)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut1)
response, block = p3k_swiftclient.get_object(
self.storage_url,
self.token,
self.vault,
self.block,
self.response_dict)
self.assertEqual(r, response)
self.assertEqual(mock_file, block)
def test_delete_object(self):
r = Response(204)
fut1 = asyncio.Future(loop=None)
fut1.set_result(r)
p3k_swiftclient.aiohttp.request = mock.Mock(return_value=fut1)
p3k_swiftclient.delete_object(
self.storage_url,
self.token,
self.vault,
self.block,
self.response_dict)
self.assertEqual(self.response_dict['status'], 204)
| |
from textblob import TextBlob
from Voice.SelectTask import *
import numpy as np
import math
import json
import logging
import copy
Responses = {"search": ["who is donald trump", "what is cnc machine", "how to implement quicksort algorithm",
"can you search how to remove stop words", "what is the weather today"],
"screenshot": ["screenshot"],
"type": ["type this"],
"youtube": ["I want to hear peaceful music", "play me a song starboy", "I want to watch a movie star wars"],
"news": ["what is todays headlines", "tell me news about narendra modi", "what is the weather today"],
"reminder": ["set an alarm at 8:30 pm", "remind me, I have a meeting at 8:00 pm"],
"email": ["send an email to gmail"]
}
# Questions = [ ["",""], ["",""], ["",""], ["",""], ["",""], ["",""], ["",""] ]
Match_Table = {
"Interrogatives": [],#["who", "what", "how", "how", "", "what", "", "", "", "", "what", "", "", "", "", ""],
"Verbs": [],#["", "", "implement", "implement", "search", "", "type", " write", "hear", "play", "", "tell", "set",
#"put", "remind", "send"],
"Nouns": [],#["trump", "machine", "quicksort", "algorithm", "words", "weather", "", "", "", "starboy", "headline",
#"narendra", "alarm", "alarm", "meeting", "email"],
"Class": [],#["search", "search", "search", "search", "search", "search", "type", "type", "youtube", "youtube", "news",
#"news", "reminder", "reminder", "email"]
}
total_Num_Class = 7
LowConfidence = 0.1
similarity_Threshold = 0.5
def pro_Class(key="search"):
total_freq = 0
class_freq = 0
for eachClassName in Match_Table["Class"]:
total_freq += 1
if eachClassName == key:
class_freq += 1
return class_freq / total_freq
def cal_Each_Column_Pro(queryWord="", className="search", key="Verbs"):
document_Frequency = 0
total_Num_Doc = 0
index = 0
for eachClass in Match_Table["Class"] :
if eachClass == className:
total_Num_Doc += 1
match_word = Match_Table[key][index]
if queryWord == match_word:
document_Frequency += 1
index += 1
return (document_Frequency + 1) / (total_Num_Doc + total_Num_Class)
def cal_total_Pro(queryInterrogatives=[], queryVerbs=[], queryNouns=[], className="search"):
interrogatives_result = 1
verbs_result = 1
nouns_result = 1
for eachQueryInterrogative in queryInterrogatives:
interrogatives_result *= cal_Each_Column_Pro(eachQueryInterrogative, className, "Interrogatives")
for eachQueryVerb in queryVerbs:
verbs_result *= cal_Each_Column_Pro(eachQueryVerb, className, "Verbs")
for eachQueryNoun in queryNouns:
nouns_result *= cal_Each_Column_Pro(eachQueryNoun, className, "Nouns")
return float(interrogatives_result) * float(verbs_result) * float(nouns_result) * float(pro_Class(className))
def similarity_Matrix(big_Union, text, row, column):
vec_list = np.zeros((row, column), dtype=np.float64)
i = 0
j = 0
for eachQueryWord in text.split():
for eachUnionWord in big_Union:
if eachQueryWord == eachUnionWord:
vec_list[i, j] = 1
else:
vec_list[i, j] = 0
j += 1
i += 1
j = 0
i = 0
j = 0
sim_vector = []
for eachUnionWord in big_Union:
max = 0
for eachQueryWord in text.split():
if vec_list[j, i] == 1:
max = 1
break
else:
similarity = SimilarityCheck(eachUnionWord, eachQueryWord)
if similarity > similarity_Threshold:
vec_list[j, i] = similarity
else:
vec_list[j, i] = 0
if max <= vec_list[j, i]:
max = vec_list[j, i]
j += 1
sim_vector.append(max)
i += 1
j = 0
return sim_vector
def match_Similarity_with_Resposes(className, queryText):
bag_Of_Words = []
for eachSentence in Responses[className]:
for eachWord in eachSentence.split():
bag_Of_Words.append(eachWord)
big_Union = list(set().union(bag_Of_Words, queryText.split()))
row = len(queryText.split())
column = len(big_Union)
s1 = similarity_Matrix(big_Union, queryText, row, column)
row = len(bag_Of_Words)
classResponseText = ""
for eachWord in bag_Of_Words:
classResponseText += eachWord
classResponseText += " "
s2 = similarity_Matrix(big_Union, classResponseText, row, column)
dot_Product = np.dot(s1, s2)
magnitude_S1 = math.sqrt(sum(i ** 2 for i in s1))
magnitude_S2 = math.sqrt(sum(i ** 2 for i in s2))
cosine_Sim = dot_Product / (magnitude_S1 * magnitude_S2)
print("Cosine Similarity: ", cosine_Sim)
return cosine_Sim
def remove_StopWords(text):
StopWords = "a about above after again against all am an and any are aren't as at be because been before being below between both\
but by can't cannot could couldn't did didn't do does doesn't doing don't down during each few for from further had hadn't has hasn't have\
haven't having he he'd he'll he's her here here's hers herself him himself his how's i i'd i'll i'm i've if in into isn't it it's\
its itself let's me more most mustn't my myself no nor not of off on once only or other ought our ours"
blob = TextBlob(text)
stripped_Sentence = ""
for eachWord in blob.words:
if eachWord in StopWords.split():
#print("Stopword: ", eachWord)
logging.info("Stopword found: ", eachWord)
else:
stripped_Sentence = stripped_Sentence + " " + eachWord
return TextBlob(stripped_Sentence)
def GetInterrogatives(blob):
interrogatives = ["what", "when", "why", "which", "who", "how", "whose"]
listOfInterrogatives = []
for eachWord in blob.words:
if eachWord in interrogatives:
listOfInterrogatives.append(eachWord)
return listOfInterrogatives
def bernoulli_Selection(text):
global Responses, Match_Table
auto_append(Responses)
try:
with open('Responses.json', 'r') as inputFile:
Responses = json.load(inputFile)
except:
print("Responses.json file not found")
try:
with open('Match_Table.json', 'r') as inputFile:
Match_Table = json.load(inputFile)
except:
print("Match Table.json file not found")
blob = remove_StopWords(text.lower())
verbs = GetVerbs(blob)
nouns = GetNoun(blob)
interrogatives = GetInterrogatives(blob)
selection_list = [0, 0, 0, 0, 0, 0, 0]
selection_list[0] = cal_total_Pro(interrogatives, verbs, nouns, "search")
selection_list[1] = cal_total_Pro(interrogatives, verbs, nouns, "screenshot")
selection_list[2] = cal_total_Pro(interrogatives, verbs, nouns, "type")
selection_list[3] = cal_total_Pro(interrogatives, verbs, nouns, "youtube")
selection_list[4] = cal_total_Pro(interrogatives, verbs, nouns, "news")
selection_list[5] = cal_total_Pro(interrogatives, verbs, nouns, "reminder")
selection_list[6] = cal_total_Pro(interrogatives, verbs, nouns, "email")
Objects = []
for i in list(range(7)):
object = Rank()
object.probability = max(selection_list)
object.index = selection_list.index(object.probability)
Objects.append(object)
selection_list.insert(object.index, 0)
print("Rank ", i + 1, object.index, object.probability)
max_Similarity_Index = 0
checkFlag = False
for i in list(range(6)):
try:
if Objects[i].probability == Objects[i + 1].probability or Objects[i].probability - Objects[
i + 1].probability <= 0.1:
checkFlag = True
similarity1 = match_Similarity_with_Resposes(ClassName[Objects[i].index], text)
similarity2 = match_Similarity_with_Resposes(ClassName[Objects[i + 1].index], text)
if max_Similarity_Index < similarity1:
max_Similarity_Index = Objects[i].index
if max_Similarity_Index < similarity2:
max_Similarity_Index = Objects[i + 1].index
else:
break
except:
pass
if checkFlag == True:
append_In_MatchTable(blob, ClassName[max_Similarity_Index], text)
thread = threading.Thread(target=switchExecuteTask, args=(max_Similarity_Index,text))
thread.start()
else:
thread = threading.Thread(target=switchExecuteTask, args=(Objects[0].index,text))
thread.start()
try:
with open('Responses.json', 'w') as outfile:
json.dump(Responses, outfile)
except:
print("Could not write Response.json")
try:
with open('Match_Table.json', 'w') as outfile:
json.dump(Match_Table, outfile)
except:
print("Could not write Match_Table.json")
if checkFlag == True:
return max_Similarity_Index, selection_list[max_Similarity_Index]
else:
return Objects[0].index, selection_list[Objects[0].index]
def append_In_MatchTable(blob, className, text):
nouns = GetNoun(blob)
verbs = GetVerbs(blob)
interrogatives = GetInterrogatives(blob)
lengths = []
lengths.append(len(nouns))
lengths.append(len(verbs))
lengths.append(len(interrogatives))
try:
last_Index = Match_Table["Interrogatives"].index(Match_Table["Interrogatives"][-1]) + 1
except:
last_Index = 0
max_Length = max(lengths)
for i in list(range(max_Length)):
try:
if interrogatives[i] in Match_Table["Interrogatives"] and className in Match_Table["Class"]:
Match_Table["Interrogatives"].insert(last_Index," ")
else:
Match_Table["Interrogatives"].insert(last_Index,interrogatives[i])
except:
Match_Table["Interrogatives"].insert(last_Index," ")
try:
if verbs[i] in Match_Table["Verbs"] and className in Match_Table["Class"]:
Match_Table["Verbs"].insert(last_Index," ")
else:
Match_Table["Verbs"].insert(last_Index,verbs[i])
except:
Match_Table["Verbs"].insert(last_Index," ")
try:
if nouns[i] in Match_Table["Nouns"] and className in Match_Table["Class"]:
Match_Table["Nouns"].insert(last_Index," ")
else:
Match_Table["Nouns"].insert(last_Index,nouns[i])
except:
Match_Table["Nouns"].insert(last_Index," ")
Match_Table["Class"].insert(last_Index,className)
if Match_Table["Interrogatives"][i] == " " and Match_Table["Verbs"][i] == " " and Match_Table["Nouns"][i] == " ":
Match_Table["Interrogatives"].remove(Match_Table["Interrogatives"][i])
Match_Table["Verbs"].remove(Match_Table["Verbs"][i])
Match_Table["Nouns"].remove(Match_Table["Nouns"][i])
Match_Table["Class"].remove(Match_Table["Class"][i])
#print(len(Match_Table["Interrogatives"]), len(Match_Table["Verbs"]), len(Match_Table["Nouns"]),len(Match_Table["Class"]))
if text in Responses[className]:
logging.info("Response found")
else:
Responses[className].append(text)
if len(Match_Table["Interrogatives"]) != len(Match_Table["Verbs"]) and len(Match_Table["Verbs"]) != len(Match_Table["Nouns"]) and len(Match_Table["Nouns"]) != len(Match_Table["Class"]):
logging.debug("Sync failed!")
#print(Match_Table["Interrogatives"], Match_Table["Verbs"], Match_Table["Nouns"], Match_Table["Class"])
def auto_append(Responses):
for i in list(range(7)):
for eachText in Responses[ClassName[i]]:
append_In_MatchTable(blob=remove_StopWords(eachText), className=ClassName[i], text=eachText)
ClassName = {
0: "search",
1: "screenshot",
2: "type",
3: "youtube",
4: "news",
5: "reminder",
6: "email"
}
class Rank:
index = 0
probability = 0
def main():
bernoulli_Selection("what is the weather today")
# Automate Responses
if __name__ == '__main__':
main()
| |
import pandas
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from hmmlearn import hmm
from hmmlearn import utils
table = pandas.read_csv('euro.csv', delimiter=';')
rate = np.array(table["Kurs"])
change = np.array(table["Zmiana"])
change = np.diff(rate)
# comb = np.column_stack((rate,change))
comb = change.reshape(-1,1)
data = 10*comb[:-100]
test = 10*comb[-100:]
# samples = 2001
# x = 2*np.pi*np.arange(samples)
# rate = 10*np.sin(x/50)
# change = 10*np.cos(x/50)
# rate += np.random.random(samples)
# change += np.random.random(samples)
# data = np.column_stack((rate,change))
# sign = 10*np.sin(x/50)*np.sin(x/40)+10*np.cos(x/30)
# diff = np.diff(sign)[:1000]
# csum = np.cumsum(sign)[:1000]
# sign = sign[:1000]
# data = np.column_stack((sign,csum))
#
# data = sign.reshape(-1,1)
def train(data):
model = hmm.GaussianHMM(n_components=1*128, tol=1., n_iter=100, verbose = True)
model.fit(data)
model.init_params = ''
return model
def next_prob(model,data):
_, posteriors = model.score_samples(data)
x = np.linspace(-15,15,100)
post = posteriors[-1,:]
aa = np.column_stack(
post[i]*mlab.normpdf(x,model.means_[i,1],model._covars_[i,1]**0.5)
for i in range(0,model.n_components))
return x,np.sum(aa,axis=1)
def predict(model,data,samples):
_, posteriors = model.score_samples(data)
x = np.linspace(-15,15,100)
post = np.dot(posteriors[-1], model.transmat_)
xx = np.empty((len(x),samples))
for j in range(samples):
aa = np.column_stack(
post[i]*mlab.normpdf(x,model.means_[i],model._covars_[i]**0.5)
for i in range(0,model.n_components))
xx[:,j] = np.sum(aa,axis=1)
post = np.dot(post, model.transmat_)
return x,xx
def predict2(model,data,samples,emis=1):
_, posteriors = model.score_samples(data)
x = np.linspace(-15,15,100)
post = np.dot(posteriors[-1], model.transmat_)
xx = np.empty((len(x),samples))
for j in range(samples):
aa = np.column_stack(
post[i]*mlab.normpdf(x,model.means_[i,emis],model._covars_[i,emis]**0.5)
for i in range(0,model.n_components))
xx[:,j] = np.sum(aa,axis=1)
post = np.dot(post, model.transmat_)
return x,xx
def states_plot(model,e):
logprob, posterior = model.score_samples(e)
plt.clf()
ax = plt.subplot(211)
plt.plot(e)
plt.subplot(212,sharex=ax)
plt.imshow(posterior.T, aspect='auto')
def plot_usage(model, e):
logprob, posterior = model.score_samples(e)
usage = np.sum(posterior.T,axis=1)
# plt.clf()
plt.plot(np.sort(usage)/float(sum(usage)))
def plot_map_usage(model, signal):
# plt.clf()
model.algorithm = 'map'
pred = model.predict(signal)
bc = np.bincount(pred,minlength=model.n_components)
max_id = np.argmax(bc)
max_covar_id = np.argmax(model.covars_)
ids = np.argwhere(bc == 0).flatten()
used = np.argwhere(bc != 0).flatten()
probs = bc/float(sum(bc))
plt.plot(np.sort(probs))
def plot_viterbi_usage(model, signal):
# plt.clf()
model.algorithm = 'viterbi'
pred = model.predict(signal)
bc = np.bincount(pred,minlength=model.n_components)
max_id = np.argmax(bc)
max_covar_id = np.argmax(model.covars_)
ids = np.argwhere(bc == 0).flatten()
used = np.argwhere(bc != 0).flatten()
probs = bc/float(sum(bc))
plt.plot(np.sort(probs))
def reorder_usage(model, e):
logprob, posterior = model.score_samples(e)
usage = np.sum(posterior.T,axis=1)
keys = np.flip(np.argsort(usage),axis=0)
model.means_ = model.means_[keys]
model._covars_ = model._covars_[keys]
model.startprob_ = model.startprob_[keys]
model.transmat_ = model.transmat_[keys]
model.transmat_[:,:] = model.transmat_[:,keys]
def reorder_model(model, e):
pred = model.predict(e)
bc = np.bincount(pred,minlength=model.n_components)
keys = np.flip(np.argsort(bc),axis=0)
model.means_ = model.means_[keys]
model._covars_ = model._covars_[keys]
model.startprob_ = model.startprob_[keys]
model.transmat_ = model.transmat_[keys]
model.transmat_[:,:] = model.transmat_[:,keys]
def clone(model):
from sklearn.externals import joblib
joblib.dump(model,"/tmp/foobarmodel.pkl")
return joblib.load("/tmp/foobarmodel.pkl")
def diff_plot(model,previous):
plt.clf()
ax = plt.subplot(221)
plt.imshow(model.transmat_)
plt.subplot(222, sharex = ax, sharey = ax)
plt.imshow(model.transmat_ - previous.transmat_)
ax = plt.subplot(223)
plt.plot(model.means_)
plt.plot(model.means_ - previous.means_)
plt.subplot(224, sharex = ax)
plt.plot(model._covars_)
plt.plot(model._covars_ - previous._covars_)
def fix_unused(model, signal):
"""Unused states decided MAP or viterbi usage"""
# model.algorithm = 'map'
# pred = model.predict(signal)
# usage = np.bincount(pred,minlength=model.n_components)
# treshold = np.sort(usage)[model.n_components//10]
#
# ids = np.argwhere(usage <= treshold).flatten()
# used = np.argwhere(usage > treshold).flatten()
# probs = usage/float(sum(usage))
"""Unused states decided on average state probability"""
logprob, posterior = model.score_samples(signal)
usage = np.sum(posterior.T,axis=1)
treshold = np.sort(usage)[model.n_components//10]
ids = np.argwhere(usage <= treshold).flatten()
used = np.argwhere(usage > treshold).flatten()
probs = usage/float(sum(usage))
ids = np.argwhere(probs <= 0.001).flatten()
used = np.argwhere(usage > 0.001).flatten()
mapped = {}
# model.algorithm = 'map'
import random
import sklearn.mixture
ids = ids[0:len(used)]
# ids = ids[0:model.n_components//10]
for id in ids:
# replace_id = np.random.choice(used)
# randomly select node to clone according to its "information weight"
# replace_id = np.random.choice(model.n_components,p=probs)
replace_id = random.choices(range(model.n_components),weights=probs)[0]
mapped[id] = [replace_id, int(probs[id]*1000)/1000, int(probs[replace_id]*1000)/1000, int(model.transmat_[replace_id,replace_id]*1000)/1000]
# if (np.sum(model.transmat_[:,replace_id])) > 3):
# unroll thight self loop
if model.transmat_[replace_id,replace_id] > 0.1:
# can clone this state any more
probs[replace_id] = 0
probs[id] = probs[replace_id]
mapped[id].append('s')
in_trans = model.transmat_[:,id].copy()
model.transmat_[id,:] = model.transmat_[replace_id,:]
model.transmat_[replace_id,id] += model.transmat_[replace_id,replace_id]
model.transmat_[id,id] += model.transmat_[replace_id,replace_id]
model.transmat_[replace_id,replace_id] = 2e-290
# staing in giver state is forbidden
# in place of that transit to cloned state
# model.transmat_[replace_id,id] += model.transmat_[replace_id,replace_id]
# model.transmat_[replace_id,replace_id] = 0.0001
utils.normalize(model.transmat_, 1)
model.startprob_[replace_id] /= 2.
model.startprob_[id] += model.startprob_[replace_id]
model.means_[id] = model.means_[replace_id]
# diverge them slighly to cover more ground
# model.means_[replace_id] *= 1.001
model._covars_[id] = model._covars_[replace_id]
#TODO: unroll longer loops
#refit to general node
# to many ins, to many out, to large emission - coverage
elif random.random() > 0.5:
# lower prob of used node
# allow cloning of both
probs[replace_id] //= 2
probs[id] = probs[replace_id]
size = model.n_components
ord = np.random.binomial(1,0.5,model.n_components)
nord = 1 - ord
mapped[id].append('i')
in_trans = model.transmat_[:,id].copy()
# clone the not used node
# out transitions (row) like in original
model.transmat_[id,:] = model.transmat_[replace_id,:]
# in trasitions (column) half for each of two (original and clone)
model.transmat_[:,id][ord == 1] = model.transmat_[:,replace_id][ord == 1]
model.transmat_[:,id][ord == 0] = 2e-290
model.transmat_[:,replace_id][ord == 1] = 2e-290
# original trans should be small, add to them to keep row normalization to 1
utils.normalize(model.transmat_, 1)
model.startprob_[replace_id] /= 2.
model.startprob_[id] += model.startprob_[replace_id]
model.means_[id] = model.means_[replace_id]
model._covars_[id] = model._covars_[replace_id]
else:
# lower prob of used node
# allow cloning of both
probs[replace_id] //= 2
probs[id] = probs[replace_id]
size = model.n_components
ord = np.random.binomial(1,0.5,model.n_components)
nord = 1 - ord
mapped[id].append('o')
in_trans = model.transmat_[:,id].copy()
# clone the not used node
# out transitions (row) like in original
model.transmat_[id,:][ord == 1] = model.transmat_[replace_id,:][ord == 1]
model.transmat_[id,:][ord == 0] = 2e-290
model.transmat_[replace_id,:][ord == 1] = 2e-290
# in trasitions (column) half for each of two (original and clone)
model.transmat_[:,replace_id] /= 2.
model.transmat_[:,id] = in_trans/2. + model.transmat_[:,replace_id]
# model.transmat_[:,replace_id] += in_trans/2.
# original trans should be small, add to them to keep row normalization to 1
utils.normalize(model.transmat_, 1)
model.startprob_[replace_id] /= 2.
model.startprob_[id] += model.startprob_[replace_id]
model.means_[id] = model.means_[replace_id]
model._covars_[id] = model._covars_[replace_id]
print("fixed {} nodes of used {} and unused {}, with map {}".format(len(ids), len(used), model.n_components - len(used), mapped))
| |
'''
Created on 09/09/2014
@author: u76345
'''
from __future__ import absolute_import
import sys, logging
from eotools.utils import log_multiline
from agdc import DataCube
# Set top level standard output
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger = logging.getLogger(__name__)
if not logger.level:
logger.setLevel(logging.DEBUG) # Default logging level for all modules
logger.addHandler(console_handler)
class BandLookup(object):
'''
Class BandLookup manages band equivalence with band_tag lookups for a given set of
lookup_scheme_name, tile_type_id, satellite_tag, sensor_name & level_name values
'''
_band_lookup_dict = {} # Class lookup dict - populated once from query
_lookup_schemes = {} # Dict containing the descriptions of all available lookup schemes
def __init__(self,
data_cube,
lookup_scheme_name=None,
tile_type_id=1, # Should this be None?
satellite_tag=None,
sensor_name=None,
level_name=None):
'''
Constructor for BandLookup class
Parameters (can all be set later with the exception of data_cube):
data_cube: Parent data_cube (or descendant) object
lookup_scheme_name: lookup scheme name. Needs to be a member of self.lookup_schemes
tile_type_id: Tile Type identifier. Defaults to 1 - should this be None?
satellite_tag: Short name of satellite
sensor_name: Name of sensor
level_name: Processing level name
'''
assert isinstance(data_cube, DataCube), 'data_cube parameter must be of type DataCube'
assert not lookup_scheme_name or type(lookup_scheme_name) == str, 'lookup_scheme_name parameter must be of type str'
assert not tile_type_id or type(tile_type_id) in (long, int), 'tile_type_id parameter must be of type long or int'
assert not satellite_tag or type(satellite_tag) == str, 'satellite_tag parameter must be of type str'
assert not sensor_name or type(sensor_name) == str, 'sensor_name parameter must be of type str'
assert not level_name or type(level_name) == str, 'level_name parameter must be of type str'
if data_cube.debug:
console_handler.setLevel(logging.DEBUG)
# Set instance values if provided as constructor parameters
self.lookup_scheme_name = lookup_scheme_name
self.tile_type_id = tile_type_id
self.satellite_tag = satellite_tag
self.sensor_name = sensor_name
self.level_name = level_name
self.db_connection = data_cube.db_connection
db_cursor = self.db_connection.cursor()
if not BandLookup._band_lookup_dict: # Check whether class lookup dict has been populated
sql = """-- Retrieve all band equivalence information
SELECT
band_lookup_scheme.lookup_scheme_name,
band_source.tile_type_id,
coalesce(satellite.satellite_tag, 'DERIVED') as satellite_tag,
coalesce(sensor_name, level_name) as sensor_name,
processing_level.level_name,
band_equivalent.master_band_tag,
band_source.tile_layer,
band_equivalent.nominal_centre::float,
band_equivalent.nominal_bandwidth::float,
band_equivalent.centre_tolerance::float,
band_equivalent.bandwidth_tolerance::float,
COALESCE(band_adjustment.adjustment_offset, 0.0)::float AS adjustment_offset,
COALESCE(band_adjustment.adjustment_multiplier, 1.0)::float AS adjustment_multiplier,
band_lookup_scheme.lookup_scheme_id,
band.satellite_id,
band.sensor_id,
band.band_id,
band_equivalent.master_band_name,
band_type_name,
band.min_wavelength::float,
band.max_wavelength::float,
band_lookup_scheme.lookup_scheme_description
FROM band
JOIN band_type using(band_type_id)
JOIN band_source using (band_id)
JOIN processing_level using(level_id)
JOIN band_equivalent ON band_equivalent.band_type_id = band.band_type_id
and abs((band.max_wavelength::numeric + band.min_wavelength::numeric) / 2.0 - band_equivalent.nominal_centre) <= band_equivalent.centre_tolerance
AND abs(band.max_wavelength::numeric - band.min_wavelength::numeric - band_equivalent.nominal_bandwidth) <= band_equivalent.bandwidth_tolerance
JOIN band_lookup_scheme USING (lookup_scheme_id)
LEFT JOIN band_adjustment USING (lookup_scheme_id, band_id)
LEFT JOIN sensor using(satellite_id, sensor_id)
LEFT JOIN satellite using(satellite_id)
ORDER BY 1,2,3,4,5,7
"""
log_multiline(logger.debug, sql, 'SQL', '\t')
db_cursor.execute(sql)
for record in db_cursor:
# Create nested dict with levels keyed by:
# lookup_scheme_name, tile_type_id, satellite_tag, sensor_name, level_name, band_tag
lookup_scheme_dict = BandLookup._band_lookup_dict.get(record[0])
if lookup_scheme_dict is None:
lookup_scheme_dict = {}
BandLookup._band_lookup_dict[record[0]] = lookup_scheme_dict
BandLookup._lookup_schemes[record[0]] = record[21] # Set lookup scheme description
tile_type_id_dict = lookup_scheme_dict.get(record[1])
if tile_type_id_dict is None:
tile_type_id_dict = {}
lookup_scheme_dict[record[1]] = tile_type_id_dict
satellite_tag_dict = tile_type_id_dict.get(record[2])
if satellite_tag_dict is None:
satellite_tag_dict = {}
tile_type_id_dict[record[2]] = satellite_tag_dict
sensor_name_dict = satellite_tag_dict.get(record[3])
if sensor_name_dict is None:
sensor_name_dict = {}
satellite_tag_dict[record[3]] = sensor_name_dict
level_name_dict = sensor_name_dict.get(record[4])
if level_name_dict is None:
level_name_dict = {}
sensor_name_dict[record[4]] = level_name_dict
assert level_name_dict.get(record[5]) is None, 'Duplicated band_tag record'
level_name_dict[record[5]] = {
'tile_layer': record[6],
'nominal_centre': record[7],
'nominal_bandwidth': record[8],
'centre_tolerance': record[9],
'bandwidth_tolerance': record[10],
'adjustment_offset': record[11],
'adjustment_multiplier': record[12],
'lookup_scheme_id': record[13],
'satellite_id': record[14],
'sensor_id': record[15],
'band_id': record[16],
'master_band_name': record[17],
'band_type_name': record[18],
'min_wavelength': record[19],
'max_wavelength': record[20]
}
log_multiline(logger.debug, BandLookup._band_lookup_dict, 'BandLookup._band_lookup_dict', '\t')
def _get_level_name_dict(self):
'''
Returns level_name_dict for pre-set lookup_scheme_name, tile_type_id, satellite_tag, sensor_name & level_name
Returns None if not found
'''
assert self.lookup_scheme_name, 'lookup_scheme_name not set'
assert self.tile_type_id, 'tile_type_id not set'
assert self.satellite_tag, 'satellite_tag not set'
assert self.sensor_name, 'sensor_name not set'
assert self.level_name, 'level_name not set'
try:
level_name_dict = BandLookup._band_lookup_dict[self.lookup_scheme_name][self.tile_type_id][self.satellite_tag][self.sensor_name][self.level_name]
except KeyError:
level_name_dict = {}
return level_name_dict
@property
def lookup_schemes(self):
'''
Returns a dict of available lookup_scheme descriptions keyed by lookup_scheme_name
'''
return dict(BandLookup._lookup_schemes)
@property
def bands(self):
'''
Returns a list of band tags for the current lookup_scheme_name, tile_type_id, satellite_tag, sensor_name & level_name sorted by centre wavelength
'''
level_name_dict = self._get_level_name_dict()
return sorted([band_tag for band_tag in level_name_dict.keys()],
key=lambda band_tag: level_name_dict[band_tag]['nominal_centre'])
@property
def band_info(self):
'''
Returns a nested dict keyed by band tag containing all info for each band for the current lookup_scheme_name, tile_type_id, satellite_tag, sensor_name & level_name
'''
level_name_dict = self._get_level_name_dict()
return dict(level_name_dict)
@property
def band_no(self):
'''
Returns a dict keyed by band tag containing the one-based integer band number for each band_tag for the current lookup_scheme_name, tile_type_id, satellite_tag, sensor_name & level_name
'''
level_name_dict = self._get_level_name_dict()
return {band_tag: level_name_dict[band_tag]['tile_layer'] for band_tag in level_name_dict}
@property
def band_index(self):
'''
Returns a dict keyed by band tag containing the zero-based integer band number for each band_tag for the current lookup_scheme_name, tile_type_id, satellite_tag, sensor_name & level_name
'''
level_name_dict = self._get_level_name_dict()
return {band_tag: level_name_dict[band_tag]['tile_layer'] - 1 for band_tag in level_name_dict}
@property
def adjustment_offset(self):
'''
Returns a dict keyed by band tag containing the floating point adjustment offset for each band_tag for the current lookup_scheme_name, tile_type_id, satellite_tag, sensor_name & level_name
'''
level_name_dict = self._get_level_name_dict()
return {band_tag: level_name_dict[band_tag]['adjustment_offset'] for band_tag in level_name_dict}
@property
def adjustment_multiplier(self):
'''
Returns a dict keyed by band tag containing the floating point adjustment multiplier for each band_tag for the current lookup_scheme_name, tile_type_id, satellite_tag, sensor_name & level_name
'''
level_name_dict = self._get_level_name_dict()
return {band_tag: level_name_dict[band_tag]['adjustment_multiplier'] for band_tag in level_name_dict}
@property
def band_lookup_dict(self):
"""
Returns a copy of the class value _band_lookup_dict
"""
return dict(BandLookup._band_lookup_dict)
| |
"""
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the :class:`~sklearn.svm.OneClassSVM`
as our modeling tool. The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <https://matplotlib.org/basemap/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=" ".join(species_name.split("_")[:2]))
species_name = species_name.encode("ascii")
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts["species"] == species_name]
bunch["pts_%s" % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts["dd long"])
iy = np.searchsorted(ygrid, pts["dd lat"])
bunch["cov_%s" % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(
species=("bradypus_variegatus_0", "microryzomys_minutus_0")
):
"""
Plot the species distribution.
"""
if len(species) > 2:
print(
"Note: when more than two species are provided,"
" only the first two will be used"
)
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(
species[0], data.train, data.test, data.coverages, xgrid, ygrid
)
MM_bunch = create_species_bunch(
species[1], data.train, data.test, data.coverages, xgrid, ygrid
)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[
np.random.randint(low=0, high=data.Ny, size=10000),
np.random.randint(low=0, high=data.Nx, size=10000),
].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end="")
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(
projection="cyl",
llcrnrlat=Y.min(),
urcrnrlat=Y.max(),
llcrnrlon=X.min(),
urcrnrlon=X.max(),
resolution="c",
)
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(
X, Y, land_reference, levels=[-9998], colors="k", linestyles="solid"
)
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format="%.2f")
# scatter training/testing points
plt.scatter(
species.pts_train["dd long"],
species.pts_train["dd lat"],
s=2**2,
c="black",
marker="^",
label="train",
)
plt.scatter(
species.pts_test["dd long"],
species.pts_test["dd lat"],
s=2**2,
c="black",
marker="x",
label="test",
)
plt.legend()
plt.title(species.name)
plt.axis("equal")
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean) / std)
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| |
from __future__ import absolute_import
from __future__ import unicode_literals
import random
import string
import requests
import time
from boto3.session import Session
import responses
from moto.core import BaseBackend, BaseModel
from .utils import create_id
from .exceptions import StageNotFoundException, ApiKeyNotFoundException
STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}"
class Deployment(BaseModel, dict):
def __init__(self, deployment_id, name, description=""):
super(Deployment, self).__init__()
self['id'] = deployment_id
self['stageName'] = name
self['description'] = description
self['createdDate'] = int(time.time())
class IntegrationResponse(BaseModel, dict):
def __init__(self, status_code, selection_pattern=None):
self['responseTemplates'] = {"application/json": None}
self['statusCode'] = status_code
if selection_pattern:
self['selectionPattern'] = selection_pattern
class Integration(BaseModel, dict):
def __init__(self, integration_type, uri, http_method, request_templates=None):
super(Integration, self).__init__()
self['type'] = integration_type
self['uri'] = uri
self['httpMethod'] = http_method
self['requestTemplates'] = request_templates
self["integrationResponses"] = {
"200": IntegrationResponse(200)
}
def create_integration_response(self, status_code, selection_pattern):
integration_response = IntegrationResponse(
status_code, selection_pattern)
self["integrationResponses"][status_code] = integration_response
return integration_response
def get_integration_response(self, status_code):
return self["integrationResponses"][status_code]
def delete_integration_response(self, status_code):
return self["integrationResponses"].pop(status_code)
class MethodResponse(BaseModel, dict):
def __init__(self, status_code):
super(MethodResponse, self).__init__()
self['statusCode'] = status_code
class Method(BaseModel, dict):
def __init__(self, method_type, authorization_type):
super(Method, self).__init__()
self.update(dict(
httpMethod=method_type,
authorizationType=authorization_type,
authorizerId=None,
apiKeyRequired=None,
requestParameters=None,
requestModels=None,
methodIntegration=None,
))
self.method_responses = {}
def create_response(self, response_code):
method_response = MethodResponse(response_code)
self.method_responses[response_code] = method_response
return method_response
def get_response(self, response_code):
return self.method_responses[response_code]
def delete_response(self, response_code):
return self.method_responses.pop(response_code)
class Resource(BaseModel):
def __init__(self, id, region_name, api_id, path_part, parent_id):
self.id = id
self.region_name = region_name
self.api_id = api_id
self.path_part = path_part
self.parent_id = parent_id
self.resource_methods = {
'GET': {}
}
def to_dict(self):
response = {
"path": self.get_path(),
"id": self.id,
"resourceMethods": self.resource_methods,
}
if self.parent_id:
response['parentId'] = self.parent_id
response['pathPart'] = self.path_part
return response
def get_path(self):
return self.get_parent_path() + self.path_part
def get_parent_path(self):
if self.parent_id:
backend = apigateway_backends[self.region_name]
parent = backend.get_resource(self.api_id, self.parent_id)
parent_path = parent.get_path()
if parent_path != '/': # Root parent
parent_path += '/'
return parent_path
else:
return ''
def get_response(self, request):
integration = self.get_integration(request.method)
integration_type = integration['type']
if integration_type == 'HTTP':
uri = integration['uri']
requests_func = getattr(requests, integration[
'httpMethod'].lower())
response = requests_func(uri)
else:
raise NotImplementedError(
"The {0} type has not been implemented".format(integration_type))
return response.status_code, response.text
def add_method(self, method_type, authorization_type):
method = Method(method_type=method_type,
authorization_type=authorization_type)
self.resource_methods[method_type] = method
return method
def get_method(self, method_type):
return self.resource_methods[method_type]
def add_integration(self, method_type, integration_type, uri, request_templates=None):
integration = Integration(
integration_type, uri, method_type, request_templates=request_templates)
self.resource_methods[method_type]['methodIntegration'] = integration
return integration
def get_integration(self, method_type):
return self.resource_methods[method_type]['methodIntegration']
def delete_integration(self, method_type):
return self.resource_methods[method_type].pop('methodIntegration')
class Stage(BaseModel, dict):
def __init__(self, name=None, deployment_id=None, variables=None,
description='', cacheClusterEnabled=False, cacheClusterSize=None):
super(Stage, self).__init__()
if variables is None:
variables = {}
self['stageName'] = name
self['deploymentId'] = deployment_id
self['methodSettings'] = {}
self['variables'] = variables
self['description'] = description
self['cacheClusterEnabled'] = cacheClusterEnabled
if self['cacheClusterEnabled']:
self['cacheClusterSize'] = str(0.5)
if cacheClusterSize is not None:
self['cacheClusterSize'] = str(cacheClusterSize)
def apply_operations(self, patch_operations):
for op in patch_operations:
if 'variables/' in op['path']:
self._apply_operation_to_variables(op)
elif '/cacheClusterEnabled' in op['path']:
self['cacheClusterEnabled'] = self._str2bool(op['value'])
if 'cacheClusterSize' not in self and self['cacheClusterEnabled']:
self['cacheClusterSize'] = str(0.5)
elif '/cacheClusterSize' in op['path']:
self['cacheClusterSize'] = str(float(op['value']))
elif '/description' in op['path']:
self['description'] = op['value']
elif '/deploymentId' in op['path']:
self['deploymentId'] = op['value']
elif op['op'] == 'replace':
# Method Settings drop into here
# (e.g., path could be '/*/*/logging/loglevel')
split_path = op['path'].split('/', 3)
if len(split_path) != 4:
continue
self._patch_method_setting(
'/'.join(split_path[1:3]), split_path[3], op['value'])
else:
raise Exception(
'Patch operation "%s" not implemented' % op['op'])
return self
def _patch_method_setting(self, resource_path_and_method, key, value):
updated_key = self._method_settings_translations(key)
if updated_key is not None:
if resource_path_and_method not in self['methodSettings']:
self['methodSettings'][
resource_path_and_method] = self._get_default_method_settings()
self['methodSettings'][resource_path_and_method][
updated_key] = self._convert_to_type(updated_key, value)
def _get_default_method_settings(self):
return {
"throttlingRateLimit": 1000.0,
"dataTraceEnabled": False,
"metricsEnabled": False,
"unauthorizedCacheControlHeaderStrategy": "SUCCEED_WITH_RESPONSE_HEADER",
"cacheTtlInSeconds": 300,
"cacheDataEncrypted": True,
"cachingEnabled": False,
"throttlingBurstLimit": 2000,
"requireAuthorizationForCacheControl": True
}
def _method_settings_translations(self, key):
mappings = {
'metrics/enabled': 'metricsEnabled',
'logging/loglevel': 'loggingLevel',
'logging/dataTrace': 'dataTraceEnabled',
'throttling/burstLimit': 'throttlingBurstLimit',
'throttling/rateLimit': 'throttlingRateLimit',
'caching/enabled': 'cachingEnabled',
'caching/ttlInSeconds': 'cacheTtlInSeconds',
'caching/dataEncrypted': 'cacheDataEncrypted',
'caching/requireAuthorizationForCacheControl': 'requireAuthorizationForCacheControl',
'caching/unauthorizedCacheControlHeaderStrategy': 'unauthorizedCacheControlHeaderStrategy'
}
if key in mappings:
return mappings[key]
else:
None
def _str2bool(self, v):
return v.lower() == "true"
def _convert_to_type(self, key, val):
type_mappings = {
'metricsEnabled': 'bool',
'loggingLevel': 'str',
'dataTraceEnabled': 'bool',
'throttlingBurstLimit': 'int',
'throttlingRateLimit': 'float',
'cachingEnabled': 'bool',
'cacheTtlInSeconds': 'int',
'cacheDataEncrypted': 'bool',
'requireAuthorizationForCacheControl': 'bool',
'unauthorizedCacheControlHeaderStrategy': 'str'
}
if key in type_mappings:
type_value = type_mappings[key]
if type_value == 'bool':
return self._str2bool(val)
elif type_value == 'int':
return int(val)
elif type_value == 'float':
return float(val)
else:
return str(val)
else:
return str(val)
def _apply_operation_to_variables(self, op):
key = op['path'][op['path'].rindex("variables/") + 10:]
if op['op'] == 'remove':
self['variables'].pop(key, None)
elif op['op'] == 'replace':
self['variables'][key] = op['value']
else:
raise Exception('Patch operation "%s" not implemented' % op['op'])
class ApiKey(BaseModel, dict):
def __init__(self, name=None, description=None, enabled=True,
generateDistinctId=False, value=None, stageKeys=None, customerId=None):
super(ApiKey, self).__init__()
self['id'] = create_id()
self['value'] = value if value else ''.join(random.sample(string.ascii_letters + string.digits, 40))
self['name'] = name
self['customerId'] = customerId
self['description'] = description
self['enabled'] = enabled
self['createdDate'] = self['lastUpdatedDate'] = int(time.time())
self['stageKeys'] = stageKeys
class UsagePlan(BaseModel, dict):
def __init__(self, name=None, description=None, apiStages=[],
throttle=None, quota=None):
super(UsagePlan, self).__init__()
self['id'] = create_id()
self['name'] = name
self['description'] = description
self['apiStages'] = apiStages
self['throttle'] = throttle
self['quota'] = quota
class UsagePlanKey(BaseModel, dict):
def __init__(self, id, type, name, value):
super(UsagePlanKey, self).__init__()
self['id'] = id
self['name'] = name
self['type'] = type
self['value'] = value
class RestAPI(BaseModel):
def __init__(self, id, region_name, name, description):
self.id = id
self.region_name = region_name
self.name = name
self.description = description
self.create_date = int(time.time())
self.deployments = {}
self.stages = {}
self.resources = {}
self.add_child('/') # Add default child
def __repr__(self):
return str(self.id)
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"description": self.description,
"createdDate": int(time.time()),
}
def add_child(self, path, parent_id=None):
child_id = create_id()
child = Resource(id=child_id, region_name=self.region_name,
api_id=self.id, path_part=path, parent_id=parent_id)
self.resources[child_id] = child
return child
def get_resource_for_path(self, path_after_stage_name):
for resource in self.resources.values():
if resource.get_path() == path_after_stage_name:
return resource
# TODO deal with no matching resource
def resource_callback(self, request):
path_after_stage_name = '/'.join(request.path_url.split("/")[2:])
if not path_after_stage_name:
path_after_stage_name = '/'
resource = self.get_resource_for_path(path_after_stage_name)
status_code, response = resource.get_response(request)
return status_code, {}, response
def update_integration_mocks(self, stage_name):
stage_url_lower = STAGE_URL.format(api_id=self.id.lower(),
region_name=self.region_name, stage_name=stage_name)
stage_url_upper = STAGE_URL.format(api_id=self.id.upper(),
region_name=self.region_name, stage_name=stage_name)
responses.add_callback(responses.GET, stage_url_lower,
callback=self.resource_callback)
responses.add_callback(responses.GET, stage_url_upper,
callback=self.resource_callback)
def create_stage(self, name, deployment_id, variables=None, description='', cacheClusterEnabled=None, cacheClusterSize=None):
if variables is None:
variables = {}
stage = Stage(name=name, deployment_id=deployment_id, variables=variables,
description=description, cacheClusterSize=cacheClusterSize, cacheClusterEnabled=cacheClusterEnabled)
self.stages[name] = stage
self.update_integration_mocks(name)
return stage
def create_deployment(self, name, description="", stage_variables=None):
if stage_variables is None:
stage_variables = {}
deployment_id = create_id()
deployment = Deployment(deployment_id, name, description)
self.deployments[deployment_id] = deployment
self.stages[name] = Stage(
name=name, deployment_id=deployment_id, variables=stage_variables)
self.update_integration_mocks(name)
return deployment
def get_deployment(self, deployment_id):
return self.deployments[deployment_id]
def get_stages(self):
return list(self.stages.values())
def get_deployments(self):
return list(self.deployments.values())
def delete_deployment(self, deployment_id):
return self.deployments.pop(deployment_id)
class APIGatewayBackend(BaseBackend):
def __init__(self, region_name):
super(APIGatewayBackend, self).__init__()
self.apis = {}
self.keys = {}
self.usage_plans = {}
self.usage_plan_keys = {}
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_rest_api(self, name, description):
api_id = create_id()
rest_api = RestAPI(api_id, self.region_name, name, description)
self.apis[api_id] = rest_api
return rest_api
def get_rest_api(self, function_id):
rest_api = self.apis[function_id]
return rest_api
def list_apis(self):
return self.apis.values()
def delete_rest_api(self, function_id):
rest_api = self.apis.pop(function_id)
return rest_api
def list_resources(self, function_id):
api = self.get_rest_api(function_id)
return api.resources.values()
def get_resource(self, function_id, resource_id):
api = self.get_rest_api(function_id)
resource = api.resources[resource_id]
return resource
def create_resource(self, function_id, parent_resource_id, path_part):
api = self.get_rest_api(function_id)
child = api.add_child(
path=path_part,
parent_id=parent_resource_id,
)
return child
def delete_resource(self, function_id, resource_id):
api = self.get_rest_api(function_id)
resource = api.resources.pop(resource_id)
return resource
def get_method(self, function_id, resource_id, method_type):
resource = self.get_resource(function_id, resource_id)
return resource.get_method(method_type)
def create_method(self, function_id, resource_id, method_type, authorization_type):
resource = self.get_resource(function_id, resource_id)
method = resource.add_method(method_type, authorization_type)
return method
def get_stage(self, function_id, stage_name):
api = self.get_rest_api(function_id)
stage = api.stages.get(stage_name)
if stage is None:
raise StageNotFoundException()
else:
return stage
def get_stages(self, function_id):
api = self.get_rest_api(function_id)
return api.get_stages()
def create_stage(self, function_id, stage_name, deploymentId,
variables=None, description='', cacheClusterEnabled=None, cacheClusterSize=None):
if variables is None:
variables = {}
api = self.get_rest_api(function_id)
api.create_stage(stage_name, deploymentId, variables=variables,
description=description, cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize)
return api.stages.get(stage_name)
def update_stage(self, function_id, stage_name, patch_operations):
stage = self.get_stage(function_id, stage_name)
if not stage:
api = self.get_rest_api(function_id)
stage = api.stages[stage_name] = Stage()
return stage.apply_operations(patch_operations)
def get_method_response(self, function_id, resource_id, method_type, response_code):
method = self.get_method(function_id, resource_id, method_type)
method_response = method.get_response(response_code)
return method_response
def create_method_response(self, function_id, resource_id, method_type, response_code):
method = self.get_method(function_id, resource_id, method_type)
method_response = method.create_response(response_code)
return method_response
def delete_method_response(self, function_id, resource_id, method_type, response_code):
method = self.get_method(function_id, resource_id, method_type)
method_response = method.delete_response(response_code)
return method_response
def create_integration(self, function_id, resource_id, method_type, integration_type, uri,
request_templates=None):
resource = self.get_resource(function_id, resource_id)
integration = resource.add_integration(method_type, integration_type, uri,
request_templates=request_templates)
return integration
def get_integration(self, function_id, resource_id, method_type):
resource = self.get_resource(function_id, resource_id)
return resource.get_integration(method_type)
def delete_integration(self, function_id, resource_id, method_type):
resource = self.get_resource(function_id, resource_id)
return resource.delete_integration(method_type)
def create_integration_response(self, function_id, resource_id, method_type, status_code, selection_pattern):
integration = self.get_integration(
function_id, resource_id, method_type)
integration_response = integration.create_integration_response(
status_code, selection_pattern)
return integration_response
def get_integration_response(self, function_id, resource_id, method_type, status_code):
integration = self.get_integration(
function_id, resource_id, method_type)
integration_response = integration.get_integration_response(
status_code)
return integration_response
def delete_integration_response(self, function_id, resource_id, method_type, status_code):
integration = self.get_integration(
function_id, resource_id, method_type)
integration_response = integration.delete_integration_response(
status_code)
return integration_response
def create_deployment(self, function_id, name, description="", stage_variables=None):
if stage_variables is None:
stage_variables = {}
api = self.get_rest_api(function_id)
deployment = api.create_deployment(name, description, stage_variables)
return deployment
def get_deployment(self, function_id, deployment_id):
api = self.get_rest_api(function_id)
return api.get_deployment(deployment_id)
def get_deployments(self, function_id):
api = self.get_rest_api(function_id)
return api.get_deployments()
def delete_deployment(self, function_id, deployment_id):
api = self.get_rest_api(function_id)
return api.delete_deployment(deployment_id)
def create_apikey(self, payload):
key = ApiKey(**payload)
self.keys[key['id']] = key
return key
def get_apikeys(self):
return list(self.keys.values())
def get_apikey(self, api_key_id):
return self.keys[api_key_id]
def delete_apikey(self, api_key_id):
self.keys.pop(api_key_id)
return {}
def create_usage_plan(self, payload):
plan = UsagePlan(**payload)
self.usage_plans[plan['id']] = plan
return plan
def get_usage_plans(self, api_key_id=None):
plans = list(self.usage_plans.values())
if api_key_id is not None:
plans = [
plan
for plan in plans
if self.usage_plan_keys.get(plan['id'], {}).get(api_key_id, False)
]
return plans
def get_usage_plan(self, usage_plan_id):
return self.usage_plans[usage_plan_id]
def delete_usage_plan(self, usage_plan_id):
self.usage_plans.pop(usage_plan_id)
return {}
def create_usage_plan_key(self, usage_plan_id, payload):
if usage_plan_id not in self.usage_plan_keys:
self.usage_plan_keys[usage_plan_id] = {}
key_id = payload["keyId"]
if key_id not in self.keys:
raise ApiKeyNotFoundException()
api_key = self.keys[key_id]
usage_plan_key = UsagePlanKey(id=key_id, type=payload["keyType"], name=api_key["name"], value=api_key["value"])
self.usage_plan_keys[usage_plan_id][usage_plan_key['id']] = usage_plan_key
return usage_plan_key
def get_usage_plan_keys(self, usage_plan_id):
if usage_plan_id not in self.usage_plan_keys:
return []
return list(self.usage_plan_keys[usage_plan_id].values())
def get_usage_plan_key(self, usage_plan_id, key_id):
return self.usage_plan_keys[usage_plan_id][key_id]
def delete_usage_plan_key(self, usage_plan_id, key_id):
self.usage_plan_keys[usage_plan_id].pop(key_id)
return {}
apigateway_backends = {}
for region_name in Session().get_available_regions('apigateway'):
apigateway_backends[region_name] = APIGatewayBackend(region_name)
| |
import re
import json
import datetime
import traceback
import dns.name
import dns.reversename
from distutils.version import StrictVersion
from flask import Blueprint, render_template, make_response, url_for, current_app, request, redirect, abort, jsonify, g, session
from flask_login import login_required, current_user, login_manager
from ..lib.utils import pretty_domain_name
from ..lib.utils import pretty_json
from ..decorators import can_create_domain, operator_role_required, can_access_domain, can_configure_dnssec, can_remove_domain
from ..models.user import User, Anonymous
from ..models.account import Account
from ..models.setting import Setting
from ..models.history import History
from ..models.domain import Domain
from ..models.record import Record
from ..models.record_entry import RecordEntry
from ..models.domain_template import DomainTemplate
from ..models.domain_template_record import DomainTemplateRecord
from ..models.domain_setting import DomainSetting
from ..models.base import db
from ..models.domain_user import DomainUser
from ..models.account_user import AccountUser
from .admin import extract_changelogs_from_a_history_entry
from ..decorators import history_access_required
domain_bp = Blueprint('domain',
__name__,
template_folder='templates',
url_prefix='/domain')
@domain_bp.before_request
def before_request():
# Check if user is anonymous
g.user = current_user
login_manager.anonymous_user = Anonymous
# Check site is in maintenance mode
maintenance = Setting().get('maintenance')
if maintenance and current_user.is_authenticated and current_user.role.name not in [
'Administrator', 'Operator'
]:
return render_template('maintenance.html')
# Manage session timeout
session.permanent = True
current_app.permanent_session_lifetime = datetime.timedelta(
minutes=int(Setting().get('session_timeout')))
session.modified = True
@domain_bp.route('/<path:domain_name>', methods=['GET'])
@login_required
@can_access_domain
def domain(domain_name):
# Validate the domain existing in the local DB
domain = Domain.query.filter(Domain.name == domain_name).first()
if not domain:
abort(404)
# Query domain's rrsets from PowerDNS API
rrsets = Record().get_rrsets(domain.name)
current_app.logger.debug("Fetched rrests: \n{}".format(pretty_json(rrsets)))
# API server might be down, misconfigured
if not rrsets and domain.type != 'Slave':
abort(500)
quick_edit = Setting().get('record_quick_edit')
records_allow_to_edit = Setting().get_records_allow_to_edit()
forward_records_allow_to_edit = Setting(
).get_forward_records_allow_to_edit()
reverse_records_allow_to_edit = Setting(
).get_reverse_records_allow_to_edit()
ttl_options = Setting().get_ttl_options()
records = []
# Render the "records" to display in HTML datatable
#
# BUG: If we have multiple records with the same name
# and each record has its own comment, the display of
# [record-comment] may not consistent because PDNS API
# returns the rrsets (records, comments) has different
# order than its database records.
# TODO:
# - Find a way to make it consistent, or
# - Only allow one comment for that case
if StrictVersion(Setting().get('pdns_version')) >= StrictVersion('4.0.0'):
for r in rrsets:
if r['type'] in records_allow_to_edit:
r_name = r['name'].rstrip('.')
# If it is reverse zone and pretty_ipv6_ptr setting
# is enabled, we reformat the name for ipv6 records.
if Setting().get('pretty_ipv6_ptr') and r[
'type'] == 'PTR' and 'ip6.arpa' in r_name and '*' not in r_name:
r_name = dns.reversename.to_address(
dns.name.from_text(r_name))
# Create the list of records in format that
# PDA jinja2 template can understand.
index = 0
for record in r['records']:
if (len(r['comments'])>index):
c=r['comments'][index]['content']
else:
c=''
record_entry = RecordEntry(
name=r_name,
type=r['type'],
status='Disabled' if record['disabled'] else 'Active',
ttl=r['ttl'],
data=record['content'],
comment=c,
is_allowed_edit=True)
index += 1
records.append(record_entry)
else:
# Unsupported version
abort(500)
if not re.search(r'ip6\.arpa|in-addr\.arpa$', domain_name):
editable_records = forward_records_allow_to_edit
else:
editable_records = reverse_records_allow_to_edit
return render_template('domain.html',
domain=domain,
records=records,
editable_records=editable_records,
quick_edit=quick_edit,
ttl_options=ttl_options,
current_user=current_user)
@domain_bp.route('/remove', methods=['GET', 'POST'])
@login_required
@can_remove_domain
def remove():
# domains is a list of all the domains a User may access
# Admins may access all
# Regular users only if they are associated with the domain
if current_user.role.name in ['Administrator', 'Operator']:
domains = Domain.query.order_by(Domain.name).all()
else:
# Get query for domain to which the user has access permission.
# This includes direct domain permission AND permission through
# account membership
domains = db.session.query(Domain) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
)).order_by(Domain.name)
if request.method == 'POST':
# TODO Change name from 'domainid' to something else, its confusing
domain_name = request.form['domainid']
# Get domain from Database, might be None
domain = Domain.query.filter(Domain.name == domain_name).first()
# Check if the domain is in domains before removal
if domain not in domains:
abort(403)
# Delete
d = Domain()
result = d.delete(domain_name)
if result['status'] == 'error':
abort(500)
history = History(msg='Delete domain {0}'.format(
pretty_domain_name(domain_name)),
created_by=current_user.username)
history.add()
return redirect(url_for('dashboard.dashboard'))
else:
# On GET return the domains we got earlier
return render_template('domain_remove.html',
domainss=domains)
@domain_bp.route('/<path:domain_name>/changelog', methods=['GET'])
@login_required
@can_access_domain
@history_access_required
def changelog(domain_name):
g.user = current_user
login_manager.anonymous_user = Anonymous
domain = Domain.query.filter(Domain.name == domain_name).first()
if not domain:
abort(404)
# Query domain's rrsets from PowerDNS API
rrsets = Record().get_rrsets(domain.name)
current_app.logger.debug("Fetched rrests: \n{}".format(pretty_json(rrsets)))
# API server might be down, misconfigured
if not rrsets and domain.type != 'Slave':
abort(500)
records_allow_to_edit = Setting().get_records_allow_to_edit()
records = []
# get all changelogs for this domain, in descening order
if current_user.role.name in [ 'Administrator', 'Operator' ]:
histories = History.query.filter(History.domain_id == domain.id).order_by(History.created_on.desc()).all()
else:
# if the user isn't an administrator or operator,
# allow_user_view_history must be enabled to get here,
# so include history for the domains for the user
histories = db.session.query(History) \
.join(Domain, History.domain_id == Domain.id) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.order_by(History.created_on.desc()) \
.filter(
db.and_(db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
),
History.domain_id == domain.id
)
).all()
if StrictVersion(Setting().get('pdns_version')) >= StrictVersion('4.0.0'):
for r in rrsets:
if r['type'] in records_allow_to_edit:
r_name = r['name'].rstrip('.')
# If it is reverse zone and pretty_ipv6_ptr setting
# is enabled, we reformat the name for ipv6 records.
if Setting().get('pretty_ipv6_ptr') and r[
'type'] == 'PTR' and 'ip6.arpa' in r_name and '*' not in r_name:
r_name = dns.reversename.to_address(
dns.name.from_text(r_name))
# Create the list of records in format that
# PDA jinja2 template can understand.
index = 0
for record in r['records']:
if (len(r['comments'])>index):
c=r['comments'][index]['content']
else:
c=''
record_entry = RecordEntry(
name=r_name,
type=r['type'],
status='Disabled' if record['disabled'] else 'Active',
ttl=r['ttl'],
data=record['content'],
comment=c,
is_allowed_edit=True)
index += 1
records.append(record_entry)
else:
# Unsupported version
abort(500)
changes_set = dict()
for i in range(len(histories)):
extract_changelogs_from_a_history_entry(changes_set, histories[i], i)
if i in changes_set and len(changes_set[i]) == 0: # if empty, then remove the key
changes_set.pop(i)
return render_template('domain_changelog.html', domain=domain, allHistoryChanges=changes_set)
"""
Returns a changelog for a specific pair of (record_name, record_type)
"""
@domain_bp.route('/<path:domain_name>/changelog/<path:record_name>-<path:record_type>', methods=['GET'])
@login_required
@can_access_domain
@history_access_required
def record_changelog(domain_name, record_name, record_type):
g.user = current_user
login_manager.anonymous_user = Anonymous
domain = Domain.query.filter(Domain.name == domain_name).first()
if not domain:
abort(404)
# Query domain's rrsets from PowerDNS API
rrsets = Record().get_rrsets(domain.name)
current_app.logger.debug("Fetched rrests: \n{}".format(pretty_json(rrsets)))
# API server might be down, misconfigured
if not rrsets and domain.type != 'Slave':
abort(500)
# get all changelogs for this domain, in descening order
if current_user.role.name in [ 'Administrator', 'Operator' ]:
histories = History.query.filter(History.domain_id == domain.id).order_by(History.created_on.desc()).all()
else:
# if the user isn't an administrator or operator,
# allow_user_view_history must be enabled to get here,
# so include history for the domains for the user
histories = db.session.query(History) \
.join(Domain, History.domain_id == Domain.id) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.order_by(History.created_on.desc()) \
.filter(
db.and_(db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
),
History.domain_id == domain.id
)
).all()
changes_set_of_record = dict()
for i in range(len(histories)):
extract_changelogs_from_a_history_entry(changes_set_of_record, histories[i], i, record_name, record_type)
if i in changes_set_of_record and len(changes_set_of_record[i]) == 0: # if empty, then remove the key
changes_set_of_record.pop(i)
indexes_to_pop = []
for change_num in changes_set_of_record:
changes_i = changes_set_of_record[change_num]
for hre in changes_i: # for each history record entry in changes_i
if 'type' in hre.add_rrest and hre.add_rrest['name'] == record_name and hre.add_rrest['type'] == record_type:
continue
elif 'type' in hre.del_rrest and hre.del_rrest['name'] == record_name and hre.del_rrest['type'] == record_type:
continue
else:
changes_set_of_record[change_num].remove(hre)
if change_num in changes_set_of_record and len(changes_set_of_record[change_num]) == 0: # if empty, then remove the key
indexes_to_pop.append(change_num)
for i in indexes_to_pop:
changes_set_of_record.pop(i)
return render_template('domain_changelog.html', domain=domain, allHistoryChanges=changes_set_of_record,
record_name = record_name, record_type = record_type)
@domain_bp.route('/add', methods=['GET', 'POST'])
@login_required
@can_create_domain
def add():
templates = DomainTemplate.query.all()
if request.method == 'POST':
try:
domain_name = request.form.getlist('domain_name')[0]
domain_type = request.form.getlist('radio_type')[0]
domain_template = request.form.getlist('domain_template')[0]
soa_edit_api = request.form.getlist('radio_type_soa_edit_api')[0]
account_id = request.form.getlist('accountid')[0]
if ' ' in domain_name or not domain_name or not domain_type:
return render_template(
'errors/400.html',
msg="Please enter a valid domain name"), 400
# If User creates the domain, check some additional stuff
if current_user.role.name not in ['Administrator', 'Operator']:
# Get all the account_ids of the user
user_accounts_ids = current_user.get_accounts()
user_accounts_ids = [x.id for x in user_accounts_ids]
# User may not create domains without Account
if int(account_id) == 0 or int(account_id) not in user_accounts_ids:
return render_template(
'errors/400.html',
msg="Please use a valid Account"), 400
#TODO: Validate ip addresses input
# Encode domain name into punycode (IDN)
try:
domain_name = domain_name.encode('idna').decode()
except:
current_app.logger.error("Cannot encode the domain name {}".format(domain_name))
current_app.logger.debug(traceback.format_exc())
return render_template(
'errors/400.html',
msg="Please enter a valid domain name"), 400
if domain_type == 'slave':
if request.form.getlist('domain_master_address'):
domain_master_string = request.form.getlist(
'domain_master_address')[0]
domain_master_string = domain_master_string.replace(
' ', '')
domain_master_ips = domain_master_string.split(',')
else:
domain_master_ips = []
account_name = Account().get_name_by_id(account_id)
d = Domain()
result = d.add(domain_name=domain_name,
domain_type=domain_type,
soa_edit_api=soa_edit_api,
domain_master_ips=domain_master_ips,
account_name=account_name)
if result['status'] == 'ok':
domain_id = Domain().get_id_by_name(domain_name)
history = History(msg='Add domain {0}'.format(
pretty_domain_name(domain_name)),
detail=str({
'domain_type': domain_type,
'domain_master_ips': domain_master_ips,
'account_id': account_id
}),
created_by=current_user.username,
domain_id=domain_id)
history.add()
# grant user access to the domain
Domain(name=domain_name).grant_privileges([current_user.id])
# apply template if needed
if domain_template != '0':
template = DomainTemplate.query.filter(
DomainTemplate.id == domain_template).first()
template_records = DomainTemplateRecord.query.filter(
DomainTemplateRecord.template_id ==
domain_template).all()
record_data = []
for template_record in template_records:
record_row = {
'record_data': template_record.data,
'record_name': template_record.name,
'record_status': 'Active' if template_record.status else 'Disabled',
'record_ttl': template_record.ttl,
'record_type': template_record.type,
'comment_data': [{'content': template_record.comment, 'account': ''}]
}
record_data.append(record_row)
r = Record()
result = r.apply(domain_name, record_data)
if result['status'] == 'ok':
history = History(
msg='Applying template {0} to {1} successfully.'.
format(template.name, domain_name),
detail=str(
json.dumps({
"domain":
domain_name,
"template":
template.name,
"add_rrests":
result['data'][0]['rrsets'],
"del_rrests":
result['data'][1]['rrsets']
})),
created_by=current_user.username,
domain_id=domain_id)
history.add()
else:
history = History(
msg=
'Failed to apply template {0} to {1}.'
.format(template.name, domain_name),
detail=str(result),
created_by=current_user.username)
history.add()
return redirect(url_for('dashboard.dashboard'))
else:
return render_template('errors/400.html',
msg=result['msg']), 400
except Exception as e:
current_app.logger.error('Cannot add domain. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
abort(500)
# Get
else:
# Admins and Operators can set to any account
if current_user.role.name in ['Administrator', 'Operator']:
accounts = Account.query.order_by(Account.name).all()
else:
accounts = current_user.get_accounts()
return render_template('domain_add.html',
templates=templates,
accounts=accounts)
@domain_bp.route('/setting/<path:domain_name>/delete', methods=['POST'])
@login_required
@operator_role_required
def delete(domain_name):
d = Domain()
result = d.delete(domain_name)
if result['status'] == 'error':
abort(500)
history = History(msg='Delete domain {0}'.format(
pretty_domain_name(domain_name)),
created_by=current_user.username)
history.add()
return redirect(url_for('dashboard.dashboard'))
@domain_bp.route('/setting/<path:domain_name>/manage', methods=['GET', 'POST'])
@login_required
@operator_role_required
def setting(domain_name):
if request.method == 'GET':
domain = Domain.query.filter(Domain.name == domain_name).first()
if not domain:
abort(404)
users = User.query.all()
accounts = Account.query.order_by(Account.name).all()
# get list of user ids to initialize selection data
d = Domain(name=domain_name)
domain_user_ids = d.get_user()
account = d.get_account()
return render_template('domain_setting.html',
domain=domain,
users=users,
domain_user_ids=domain_user_ids,
accounts=accounts,
domain_account=account)
if request.method == 'POST':
# username in right column
new_user_list = request.form.getlist('domain_multi_user[]')
new_user_ids = [
user.id for user in User.query.filter(
User.username.in_(new_user_list)).all() if user
]
# grant/revoke user privileges
d = Domain(name=domain_name)
d.grant_privileges(new_user_ids)
history = History(
msg='Change domain {0} access control'.format(
pretty_domain_name(domain_name)),
detail=str({'user_has_access': new_user_list}),
created_by=current_user.username,
domain_id=d.id)
history.add()
return redirect(url_for('domain.setting', domain_name=domain_name))
@domain_bp.route('/setting/<path:domain_name>/change_type',
methods=['POST'])
@login_required
@operator_role_required
def change_type(domain_name):
domain = Domain.query.filter(Domain.name == domain_name).first()
if not domain:
abort(404)
domain_type = request.form.get('domain_type')
if domain_type is None:
abort(500)
if domain_type == '0':
return redirect(url_for('domain.setting', domain_name=domain_name))
#TODO: Validate ip addresses input
domain_master_ips = []
if domain_type == 'slave' and request.form.getlist('domain_master_address'):
domain_master_string = request.form.getlist(
'domain_master_address')[0]
domain_master_string = domain_master_string.replace(
' ', '')
domain_master_ips = domain_master_string.split(',')
d = Domain()
status = d.update_kind(domain_name=domain_name,
kind=domain_type,
masters=domain_master_ips)
if status['status'] == 'ok':
history = History(msg='Update type for domain {0}'.format(
pretty_domain_name(domain_name)),
detail=str({
"domain": domain_name,
"type": domain_type,
"masters": domain_master_ips
}),
created_by=current_user.username,
domain_id=Domain().get_id_by_name(domain_name))
history.add()
return redirect(url_for('domain.setting', domain_name = domain_name))
else:
abort(500)
@domain_bp.route('/setting/<path:domain_name>/change_soa_setting',
methods=['POST'])
@login_required
@operator_role_required
def change_soa_edit_api(domain_name):
domain = Domain.query.filter(Domain.name == domain_name).first()
if not domain:
abort(404)
new_setting = request.form.get('soa_edit_api')
if new_setting is None:
abort(500)
if new_setting == '0':
return redirect(url_for('domain.setting', domain_name=domain_name))
d = Domain()
status = d.update_soa_setting(domain_name=domain_name,
soa_edit_api=new_setting)
if status['status'] == 'ok':
history = History(
msg='Update soa_edit_api for domain {0}'.format(
pretty_domain_name(domain_name)),
detail=str({
"domain": domain_name,
"soa_edit_api": new_setting
}),
created_by=current_user.username,
domain_id=d.get_id_by_name(domain_name))
history.add()
return redirect(url_for('domain.setting', domain_name = domain_name))
else:
abort(500)
@domain_bp.route('/setting/<path:domain_name>/change_account',
methods=['POST'])
@login_required
@operator_role_required
def change_account(domain_name):
domain = Domain.query.filter(Domain.name == domain_name).first()
if not domain:
abort(404)
account_id = request.form.get('accountid')
status = Domain(name=domain.name).assoc_account(account_id)
if status['status']:
return redirect(url_for('domain.setting', domain_name=domain.name))
else:
abort(500)
@domain_bp.route('/<path:domain_name>/apply',
methods=['POST'],
strict_slashes=False)
@login_required
@can_access_domain
def record_apply(domain_name):
try:
jdata = request.json
submitted_serial = jdata['serial']
submitted_record = jdata['record']
domain = Domain.query.filter(Domain.name == domain_name).first()
if domain:
current_app.logger.debug('Current domain serial: {0}'.format(
domain.serial))
if int(submitted_serial) != domain.serial:
return make_response(
jsonify({
'status':
'error',
'msg':
'The zone has been changed by another session or user. Please refresh this web page to load updated records.'
}), 500)
else:
return make_response(
jsonify({
'status':
'error',
'msg':
'Domain name {0} does not exist'.format(pretty_domain_name(domain_name))
}), 404)
r = Record()
result = r.apply(domain_name, submitted_record)
if result['status'] == 'ok':
history = History(
msg='Apply record changes to domain {0}'.format(pretty_domain_name(domain_name)),
detail=str(
json.dumps({
"domain": domain_name,
"add_rrests": result['data'][0]['rrsets'],
"del_rrests": result['data'][1]['rrsets']
})),
created_by=current_user.username,
domain_id=domain.id)
history.add()
return make_response(jsonify(result), 200)
else:
history = History(
msg='Failed to apply record changes to domain {0}'.format(
pretty_domain_name(domain_name)),
detail=str(
json.dumps({
"domain": domain_name,
"msg": result['msg'],
})),
created_by=current_user.username)
history.add()
return make_response(jsonify(result), 400)
except Exception as e:
current_app.logger.error(
'Cannot apply record changes. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status': 'error',
'msg': 'Error when applying new changes'
}), 500)
@domain_bp.route('/<path:domain_name>/update',
methods=['POST'],
strict_slashes=False)
@login_required
@can_access_domain
def record_update(domain_name):
"""
This route is used for domain work as Slave Zone only
Pulling the records update from its Master
"""
try:
jdata = request.json
domain_name = jdata['domain']
d = Domain()
result = d.update_from_master(domain_name)
if result['status'] == 'ok':
return make_response(
jsonify({
'status': 'ok',
'msg': result['msg']
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': result['msg']
}), 500)
except Exception as e:
current_app.logger.error('Cannot update record. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status': 'error',
'msg': 'Error when applying new changes'
}), 500)
@domain_bp.route('/<path:domain_name>/info', methods=['GET'])
@login_required
@can_access_domain
def info(domain_name):
domain = Domain()
domain_info = domain.get_domain_info(domain_name)
return make_response(jsonify(domain_info), 200)
@domain_bp.route('/<path:domain_name>/dnssec', methods=['GET'])
@login_required
@can_access_domain
def dnssec(domain_name):
domain = Domain()
dnssec = domain.get_domain_dnssec(domain_name)
return make_response(jsonify(dnssec), 200)
@domain_bp.route('/<path:domain_name>/dnssec/enable', methods=['POST'])
@login_required
@can_access_domain
@can_configure_dnssec
def dnssec_enable(domain_name):
domain = Domain()
dnssec = domain.enable_domain_dnssec(domain_name)
return make_response(jsonify(dnssec), 200)
@domain_bp.route('/<path:domain_name>/dnssec/disable', methods=['POST'])
@login_required
@can_access_domain
@can_configure_dnssec
def dnssec_disable(domain_name):
domain = Domain()
dnssec = domain.get_domain_dnssec(domain_name)
for key in dnssec['dnssec']:
domain.delete_dnssec_key(domain_name, key['id'])
return make_response(jsonify({'status': 'ok', 'msg': 'DNSSEC removed.'}))
@domain_bp.route('/<path:domain_name>/manage-setting', methods=['GET', 'POST'])
@login_required
@operator_role_required
def admin_setdomainsetting(domain_name):
if request.method == 'POST':
#
# post data should in format
# {'action': 'set_setting', 'setting': 'default_action, 'value': 'True'}
#
try:
jdata = request.json
data = jdata['data']
if jdata['action'] == 'set_setting':
new_setting = data['setting']
new_value = str(data['value'])
domain = Domain.query.filter(
Domain.name == domain_name).first()
setting = DomainSetting.query.filter(
DomainSetting.domain == domain).filter(
DomainSetting.setting == new_setting).first()
if setting:
if setting.set(new_value):
history = History(
msg='Setting {0} changed value to {1} for {2}'.
format(new_setting, new_value,
pretty_domain_name(domain_name)),
created_by=current_user.username,
domain_id=domain.id)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Setting updated.'
}))
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Unable to set value of setting.'
}))
else:
if domain.add_setting(new_setting, new_value):
history = History(
msg=
'New setting {0} with value {1} for {2} has been created'
.format(new_setting, new_value, pretty_domain_name(domain_name)),
created_by=current_user.username,
domain_id=domain.id)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'New setting created and updated.'
}))
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Unable to create new setting.'
}))
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Action not supported.'
}), 400)
except Exception as e:
current_app.logger.error(
'Cannot change domain setting. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status':
'error',
'msg':
'There is something wrong, please contact Administrator.'
}), 400)
| |
"""
A generic comment-moderation system which allows configuration of
moderation options on a per-model basis.
To use, do two things:
1. Create or import a subclass of ``CommentModerator`` defining the
options you want.
2. Import ``moderator`` from this module and register one or more
models, passing the models and the ``CommentModerator`` options
class you want to use.
Example
-------
First, we define a simple model class which might represent entries in
a Weblog::
from django.db import models
class Entry(models.Model):
title = models.CharField(maxlength=250)
body = models.TextField()
pub_date = models.DateField()
enable_comments = models.BooleanField()
Then we create a ``CommentModerator`` subclass specifying some
moderation options::
from tango_comments.moderation import CommentModerator, moderator
class EntryModerator(CommentModerator):
email_notification = True
enable_field = 'enable_comments'
And finally register it for moderation::
moderator.register(Entry, EntryModerator)
This sample class would apply two moderation steps to each new
comment submitted on an Entry:
* If the entry's ``enable_comments`` field is set to ``False``, the
comment will be rejected (immediately deleted).
* If the comment is successfully posted, an email notification of the
comment will be sent to site staff.
For a full list of built-in moderation options and other
configurability, see the documentation for the ``CommentModerator``
class.
"""
import datetime
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.db.models.base import ModelBase
from django.template import Context, loader
from django.utils import timezone
import tango_comments as comments
from tango_comments import signals
class AlreadyModerated(Exception):
"""
Raised when a model which is already registered for moderation is
attempting to be registered again.
"""
pass
class NotModerated(Exception):
"""
Raised when a model which is not registered for moderation is
attempting to be unregistered.
"""
pass
class CommentModerator(object):
"""
Encapsulates comment-moderation options for a given model.
This class is not designed to be used directly, since it doesn't
enable any of the available moderation options. Instead, subclass
it and override attributes to enable different options::
``auto_close_field``
If this is set to the name of a ``DateField`` or
``DateTimeField`` on the model for which comments are
being moderated, new comments for objects of that model
will be disallowed (immediately deleted) when a certain
number of days have passed after the date specified in
that field. Must be used in conjunction with
``close_after``, which specifies the number of days past
which comments should be disallowed. Default value is
``None``.
``auto_moderate_field``
Like ``auto_close_field``, but instead of outright
deleting new comments when the requisite number of days
have elapsed, it will simply set the ``is_public`` field
of new comments to ``False`` before saving them. Must be
used in conjunction with ``moderate_after``, which
specifies the number of days past which comments should be
moderated. Default value is ``None``.
``close_after``
If ``auto_close_field`` is used, this must specify the
number of days past the value of the field specified by
``auto_close_field`` after which new comments for an
object should be disallowed. Default value is ``None``.
``email_notification``
If ``True``, any new comment on an object of this model
which survives moderation will generate an email to site
staff. Default value is ``False``.
``enable_field``
If this is set to the name of a ``BooleanField`` on the
model for which comments are being moderated, new comments
on objects of that model will be disallowed (immediately
deleted) whenever the value of that field is ``False`` on
the object the comment would be attached to. Default value
is ``None``.
``moderate_after``
If ``auto_moderate_field`` is used, this must specify the number
of days past the value of the field specified by
``auto_moderate_field`` after which new comments for an
object should be marked non-public. Default value is
``None``.
Most common moderation needs can be covered by changing these
attributes, but further customization can be obtained by
subclassing and overriding the following methods. Each method will
be called with three arguments: ``comment``, which is the comment
being submitted, ``content_object``, which is the object the
comment will be attached to, and ``request``, which is the
``HttpRequest`` in which the comment is being submitted::
``allow``
Should return ``True`` if the comment should be allowed to
post on the content object, and ``False`` otherwise (in
which case the comment will be immediately deleted).
``email``
If email notification of the new comment should be sent to
site staff or moderators, this method is responsible for
sending the email.
``moderate``
Should return ``True`` if the comment should be moderated
(in which case its ``is_public`` field will be set to
``False`` before saving), and ``False`` otherwise (in
which case the ``is_public`` field will not be changed).
Subclasses which want to introspect the model for which comments
are being moderated can do so through the attribute ``_model``,
which will be the model class.
"""
auto_close_field = None
auto_moderate_field = None
close_after = None
email_notification = False
enable_field = None
moderate_after = None
def __init__(self, model):
self._model = model
def _get_delta(self, now, then):
"""
Internal helper which will return a ``datetime.timedelta``
representing the time between ``now`` and ``then``. Assumes
``now`` is a ``datetime.date`` or ``datetime.datetime`` later
than ``then``.
If ``now`` and ``then`` are not of the same type due to one of
them being a ``datetime.date`` and the other being a
``datetime.datetime``, both will be coerced to
``datetime.date`` before calculating the delta.
"""
if now.__class__ is not then.__class__:
now = datetime.date(now.year, now.month, now.day)
then = datetime.date(then.year, then.month, then.day)
if now < then:
raise ValueError("Cannot determine moderation rules because date field is set to a value in the future")
return now - then
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on
a given object.
Return ``True`` if the comment should be allowed, ``False
otherwise.
"""
if self.enable_field:
if not getattr(content_object, self.enable_field):
return False
if self.auto_close_field and self.close_after is not None:
close_after_date = getattr(content_object, self.auto_close_field)
if close_after_date is not None and self._get_delta(timezone.now(), close_after_date).days >= self.close_after:
return False
return True
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be
allowed to show up immediately, or should be marked non-public
and await approval.
Return ``True`` if the comment should be moderated (marked
non-public), ``False`` otherwise.
"""
if self.auto_moderate_field and self.moderate_after is not None:
moderate_after_date = getattr(content_object, self.auto_moderate_field)
if moderate_after_date is not None and self._get_delta(timezone.now(), moderate_after_date).days >= self.moderate_after:
return True
return False
def email(self, comment, content_object, request):
"""
Send email notification of a new comment to site staff when email
notifications have been requested.
"""
if not self.email_notification:
return
recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS]
t = loader.get_template('comments/comment_notification_email.txt')
c = Context({ 'comment': comment,
'content_object': content_object })
subject = '[%s] New comment posted on "%s"' % (get_current_site(request).name,
content_object)
message = t.render(c)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True)
class Moderator(object):
"""
Handles moderation of a set of models.
An instance of this class will maintain a list of one or more
models registered for comment moderation, and their associated
moderation classes, and apply moderation to all incoming comments.
To register a model, obtain an instance of ``Moderator`` (this
module exports one as ``moderator``), and call its ``register``
method, passing the model class and a moderation class (which
should be a subclass of ``CommentModerator``). Note that both of
these should be the actual classes, not instances of the classes.
To cease moderation for a model, call the ``unregister`` method,
passing the model class.
For convenience, both ``register`` and ``unregister`` can also
accept a list of model classes in place of a single model; this
allows easier registration of multiple models with the same
``CommentModerator`` class.
The actual moderation is applied in two phases: one prior to
saving a new comment, and the other immediately after saving. The
pre-save moderation may mark a comment as non-public or mark it to
be removed; the post-save moderation may delete a comment which
was disallowed (there is currently no way to prevent the comment
being saved once before removal) and, if the comment is still
around, will send any notification emails the comment generated.
"""
def __init__(self):
self._registry = {}
self.connect()
def connect(self):
"""
Hook up the moderation methods to pre- and post-save signals
from the comment models.
"""
signals.comment_will_be_posted.connect(self.pre_save_moderation, sender=comments.get_model())
signals.comment_was_posted.connect(self.post_save_moderation, sender=comments.get_model())
def register(self, model_or_iterable, moderation_class):
"""
Register a model or a list of models for comment moderation,
using a particular moderation class.
Raise ``AlreadyModerated`` if any of the models are already
registered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self._registry:
raise AlreadyModerated(
"The model '%s' is already being moderated" % model._meta.verbose_name
)
self._registry[model] = moderation_class(model)
def unregister(self, model_or_iterable):
"""
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotModerated("The model '%s' is not currently being moderated" % model._meta.module_name)
del self._registry[model]
def pre_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary pre-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
content_object = comment.content_object
moderation_class = self._registry[model]
# Comment will be disallowed outright (HTTP 403 response)
if not moderation_class.allow(comment, content_object, request):
return False
if moderation_class.moderate(comment, content_object, request):
comment.is_public = False
def post_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary post-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
self._registry[model].email(comment, comment.content_object, request)
# Import this instance in your own code to use in registering
# your models for moderation.
moderator = Moderator()
| |
#!/usr/bin/env python
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of targets to build artifacts."""
import os.path
import random
import string
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name,
dockerfile_dir,
shell_command,
environ={},
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
extra_docker_args=None,
verbose_success=False):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
docker_args = []
for k, v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {
'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
'OUTPUT_DIR': 'artifacts'
}
if extra_docker_args is not None:
docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
docker_args,
environ=docker_env,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
verbose_success=verbose_success)
return jobspec
def create_jobspec(name,
cmdline,
environ={},
shell=False,
flake_retries=0,
timeout_retries=0,
timeout_seconds=30 * 60,
use_workspace=False,
cpu_cost=1.0,
verbose_success=False):
"""Creates jobspec."""
environ = environ.copy()
if use_workspace:
environ['WORKSPACE_NAME'] = 'workspace_%s' % name
environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
] + cmdline
else:
environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
jobspec = jobset.JobSpec(cmdline=cmdline,
environ=environ,
shortname='build_artifact.%s' % (name),
timeout_seconds=timeout_seconds,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell,
cpu_cost=cpu_cost,
verbose_success=verbose_success)
return jobspec
_MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.10'
_ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'}
class PythonArtifact:
"""Builds Python artifacts."""
def __init__(self, platform, arch, py_version):
self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'python', platform, arch, py_version]
self.py_version = py_version
if 'manylinux' in platform:
self.labels.append('linux')
if 'linux_extra' in platform:
# linux_extra wheels used to be built by a separate kokoro job.
# Their build is now much faster, so they can be included
# in the regular artifact build.
self.labels.append('linux')
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
environ = {}
if self.platform == 'linux_extra':
# Crosscompilation build for armv7 (e.g. Raspberry Pi)
environ['PYTHON'] = '/opt/python/{}/bin/python3'.format(
self.py_version)
environ['PIP'] = '/opt/python/{}/bin/pip3'.format(self.py_version)
environ['GRPC_SKIP_PIP_CYTHON_UPGRADE'] = 'TRUE'
environ['GRPC_SKIP_TWINE_CHECK'] = 'TRUE'
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_python_linux_{}'.format(
self.arch),
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60)
elif 'manylinux' in self.platform:
if self.arch == 'x86':
environ['SETARCH_CMD'] = 'linux32'
# Inside the manylinux container, the python installations are located in
# special places...
environ['PYTHON'] = '/opt/python/{}/bin/python'.format(
self.py_version)
environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
environ['GRPC_SKIP_PIP_CYTHON_UPGRADE'] = 'TRUE'
if self.arch == 'aarch64':
environ['GRPC_SKIP_TWINE_CHECK'] = 'TRUE'
else:
# only run auditwheel if we're not crosscompiling
environ['GRPC_RUN_AUDITWHEEL_REPAIR'] = 'TRUE'
# only build the packages that depend on grpcio-tools
# if we're not crosscompiling.
# - they require protoc to run on current architecture
# - they only have sdist packages anyway, so it's useless to build them again
environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_python_%s_%s' %
(self.platform, self.arch),
'tools/run_tests/artifacts/build_artifact_python.sh',
environ=environ,
timeout_seconds=60 * 60 * 2)
elif self.platform == 'windows':
if 'Python27' in self.py_version:
environ['EXT_COMPILER'] = 'mingw32'
else:
environ['EXT_COMPILER'] = 'msvc'
# For some reason, the batch script %random% always runs with the same
# seed. We create a random temp-dir here
dir = ''.join(
random.choice(string.ascii_uppercase) for _ in range(10))
return create_jobspec(self.name, [
'tools\\run_tests\\artifacts\\build_artifact_python.bat',
self.py_version, '32' if self.arch == 'x86' else '64'
],
environ=environ,
timeout_seconds=45 * 60,
use_workspace=True)
else:
environ['PYTHON'] = self.py_version
environ['SKIP_PIP_INSTALL'] = 'TRUE'
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_python.sh'],
environ=environ,
timeout_seconds=60 * 60 * 2,
use_workspace=True)
def __str__(self):
return self.name
class RubyArtifact:
"""Builds ruby native gem."""
def __init__(self, platform, arch):
self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'ruby', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
# Ruby build uses docker internally and docker cannot be nested.
# We are using a custom workspace instead.
return create_jobspec(
self.name, ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
use_workspace=True,
timeout_seconds=90 * 60)
class CSharpExtArtifact:
"""Builds C# native extension library"""
def __init__(self, platform, arch, arch_abi=None):
self.name = 'csharp_ext_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.arch_abi = arch_abi
self.labels = ['artifact', 'csharp', platform, arch]
if arch_abi:
self.name += '_%s' % arch_abi
self.labels.append(arch_abi)
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.arch == 'android':
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_android_ndk',
'tools/run_tests/artifacts/build_artifact_csharp_android.sh',
environ={'ANDROID_ABI': self.arch_abi})
elif self.arch == 'ios':
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_csharp_ios.sh'],
timeout_seconds=60 * 60,
use_workspace=True)
elif self.platform == 'windows':
return create_jobspec(self.name, [
'tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
self.arch
],
timeout_seconds=45 * 60,
use_workspace=True)
else:
if self.platform == 'linux':
dockerfile_dir = 'tools/dockerfile/grpc_artifact_centos6_{}'.format(
self.arch)
if self.arch == 'aarch64':
# for aarch64, use a dockcross manylinux image that will
# give us both ready to use crosscompiler and sufficient backward compatibility
dockerfile_dir = 'tools/dockerfile/grpc_artifact_python_manylinux2014_aarch64'
return create_docker_jobspec(
self.name, dockerfile_dir,
'tools/run_tests/artifacts/build_artifact_csharp.sh')
else:
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_csharp.sh'],
timeout_seconds=45 * 60,
use_workspace=True)
def __str__(self):
return self.name
class PHPArtifact:
"""Builds PHP PECL package"""
def __init__(self, platform, arch):
self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'php', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(
self.name,
'tools/dockerfile/test/php73_zts_stretch_{}'.format(self.arch),
'tools/run_tests/artifacts/build_artifact_php.sh')
else:
return create_jobspec(
self.name, ['tools/run_tests/artifacts/build_artifact_php.sh'],
use_workspace=True)
class ProtocArtifact:
"""Builds protoc and protoc-plugin artifacts"""
def __init__(self, platform, arch):
self.name = 'protoc_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.labels = ['artifact', 'protoc', platform, arch]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform != 'windows':
environ = {'CXXFLAGS': '', 'LDFLAGS': ''}
if self.platform == 'linux':
dockerfile_dir = 'tools/dockerfile/grpc_artifact_centos6_{}'.format(
self.arch)
if self.arch == 'aarch64':
# for aarch64, use a dockcross manylinux image that will
# give us both ready to use crosscompiler and sufficient backward compatibility
dockerfile_dir = 'tools/dockerfile/grpc_artifact_protoc_aarch64'
environ['LDFLAGS'] += ' -static-libgcc -static-libstdc++ -s'
return create_docker_jobspec(
self.name,
dockerfile_dir,
'tools/run_tests/artifacts/build_artifact_protoc.sh',
environ=environ)
else:
environ[
'CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
return create_jobspec(
self.name,
['tools/run_tests/artifacts/build_artifact_protoc.sh'],
environ=environ,
timeout_seconds=60 * 60,
use_workspace=True)
else:
generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
return create_jobspec(
self.name,
['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
environ={'generator': generator},
use_workspace=True)
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return [
ProtocArtifact('linux', 'x64'),
ProtocArtifact('linux', 'x86'),
ProtocArtifact('linux', 'aarch64'),
ProtocArtifact('macos', 'x64'),
ProtocArtifact('windows', 'x64'),
ProtocArtifact('windows', 'x86'),
CSharpExtArtifact('linux', 'x64'),
CSharpExtArtifact('linux', 'aarch64'),
CSharpExtArtifact('macos', 'x64'),
CSharpExtArtifact('windows', 'x64'),
CSharpExtArtifact('windows', 'x86'),
CSharpExtArtifact('linux', 'android', arch_abi='arm64-v8a'),
CSharpExtArtifact('linux', 'android', arch_abi='armeabi-v7a'),
CSharpExtArtifact('linux', 'android', arch_abi='x86'),
CSharpExtArtifact('macos', 'ios'),
PythonArtifact('manylinux2014', 'x64', 'cp36-cp36m'),
PythonArtifact('manylinux2014', 'x64', 'cp37-cp37m'),
PythonArtifact('manylinux2014', 'x64', 'cp38-cp38'),
PythonArtifact('manylinux2014', 'x64', 'cp39-cp39'),
PythonArtifact('manylinux2014', 'x64', 'cp310-cp310'),
PythonArtifact('manylinux2014', 'x86', 'cp36-cp36m'),
PythonArtifact('manylinux2014', 'x86', 'cp37-cp37m'),
PythonArtifact('manylinux2014', 'x86', 'cp38-cp38'),
PythonArtifact('manylinux2014', 'x86', 'cp39-cp39'),
PythonArtifact('manylinux2014', 'x86', 'cp310-cp310'),
PythonArtifact('manylinux2010', 'x64', 'cp36-cp36m'),
PythonArtifact('manylinux2010', 'x64', 'cp37-cp37m'),
PythonArtifact('manylinux2010', 'x64', 'cp38-cp38'),
PythonArtifact('manylinux2010', 'x64', 'cp39-cp39'),
PythonArtifact('manylinux2010', 'x86', 'cp36-cp36m'),
PythonArtifact('manylinux2010', 'x86', 'cp37-cp37m'),
PythonArtifact('manylinux2010', 'x86', 'cp38-cp38'),
PythonArtifact('manylinux2010', 'x86', 'cp39-cp39'),
PythonArtifact('manylinux2014', 'aarch64', 'cp36-cp36m'),
PythonArtifact('manylinux2014', 'aarch64', 'cp37-cp37m'),
PythonArtifact('manylinux2014', 'aarch64', 'cp38-cp38'),
PythonArtifact('manylinux2014', 'aarch64', 'cp39-cp39'),
PythonArtifact('manylinux2014', 'aarch64', 'cp310-cp310'),
PythonArtifact('linux_extra', 'armv7', 'cp36-cp36m'),
PythonArtifact('linux_extra', 'armv7', 'cp37-cp37m'),
PythonArtifact('linux_extra', 'armv7', 'cp38-cp38'),
PythonArtifact('linux_extra', 'armv7', 'cp39-cp39'),
PythonArtifact('linux_extra', 'armv7', 'cp310-cp310'),
PythonArtifact('macos', 'x64', 'python3.6'),
PythonArtifact('macos', 'x64', 'python3.7'),
PythonArtifact('macos', 'x64', 'python3.8'),
PythonArtifact('macos', 'x64', 'python3.9'),
PythonArtifact('macos', 'x64', 'python3.10'),
PythonArtifact('windows', 'x86', 'Python36_32bit'),
PythonArtifact('windows', 'x86', 'Python37_32bit'),
PythonArtifact('windows', 'x86', 'Python38_32bit'),
PythonArtifact('windows', 'x86', 'Python39_32bit'),
PythonArtifact('windows', 'x86', 'Python310_32bit'),
PythonArtifact('windows', 'x64', 'Python36'),
PythonArtifact('windows', 'x64', 'Python37'),
PythonArtifact('windows', 'x64', 'Python38'),
PythonArtifact('windows', 'x64', 'Python39'),
PythonArtifact('windows', 'x64', 'Python310'),
RubyArtifact('linux', 'x64'),
RubyArtifact('macos', 'x64'),
PHPArtifact('linux', 'x64'),
PHPArtifact('macos', 'x64')
]
| |
#
#
# Copyright (C) 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Remote API test utilities.
"""
import base64
import logging
import re
from cStringIO import StringIO
import pycurl
from ganeti import errors
from ganeti import opcodes
from ganeti import http
from ganeti import server
from ganeti import utils
from ganeti import compat
from ganeti import luxi
import ganeti.rpc.client as rpccl
from ganeti import rapi
import ganeti.http.server # pylint: disable=W0611
import ganeti.server.rapi # pylint: disable=W0611
from ganeti.rapi.auth import users_file
import ganeti.rapi.client # pylint: disable=W0611
_URI_RE = re.compile(r"https://(?P<host>.*):(?P<port>\d+)(?P<path>/.*)")
class VerificationError(Exception):
"""Dedicated error class for test utilities.
This class is used to hide all of Ganeti's internal exception, so that
external users of these utilities don't have to integrate Ganeti's exception
hierarchy.
"""
def _GetOpById(op_id):
"""Tries to get an opcode class based on its C{OP_ID}.
"""
try:
return opcodes.OP_MAPPING[op_id]
except KeyError:
raise VerificationError("Unknown opcode ID '%s'" % op_id)
def _HideInternalErrors(fn):
"""Hides Ganeti-internal exceptions, see L{VerificationError}.
"""
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except (errors.GenericError, rapi.client.GanetiApiError), err:
raise VerificationError("Unhandled Ganeti error: %s" % err)
return wrapper
@_HideInternalErrors
def VerifyOpInput(op_id, data):
"""Verifies opcode parameters according to their definition.
@type op_id: string
@param op_id: Opcode ID (C{OP_ID} attribute), e.g. C{OP_CLUSTER_VERIFY}
@type data: dict
@param data: Opcode parameter values
@raise VerificationError: Parameter verification failed
"""
op_cls = _GetOpById(op_id)
try:
op = op_cls(**data)
except TypeError, err:
raise VerificationError("Unable to create opcode instance: %s" % err)
try:
op.Validate(False)
except errors.OpPrereqError, err:
raise VerificationError("Parameter validation for opcode '%s' failed: %s" %
(op_id, err))
@_HideInternalErrors
def VerifyOpResult(op_id, result):
"""Verifies opcode results used in tests (e.g. in a mock).
@type op_id: string
@param op_id: Opcode ID (C{OP_ID} attribute), e.g. C{OP_CLUSTER_VERIFY}
@param result: Mocked opcode result
@raise VerificationError: Return value verification failed
"""
resultcheck_fn = _GetOpById(op_id).OP_RESULT
if not resultcheck_fn:
logging.warning("Opcode '%s' has no result type definition", op_id)
elif not resultcheck_fn(result):
raise VerificationError("Given result does not match result description"
" for opcode '%s': %s" % (op_id, resultcheck_fn))
def _GetPathFromUri(uri):
"""Gets the path and query from a URI.
"""
match = _URI_RE.match(uri)
if match:
return match.groupdict()["path"]
else:
return None
def _FormatHeaders(headers):
"""Formats HTTP headers.
@type headers: sequence of strings
@rtype: string
"""
assert compat.all(": " in header for header in headers)
return "\n".join(headers)
class FakeCurl(object):
"""Fake cURL object.
"""
def __init__(self, handler):
"""Initialize this class
@param handler: Request handler instance
"""
self._handler = handler
self._opts = {}
self._info = {}
def setopt(self, opt, value):
self._opts[opt] = value
def getopt(self, opt):
return self._opts.get(opt)
def unsetopt(self, opt):
self._opts.pop(opt, None)
def getinfo(self, info):
return self._info[info]
def perform(self):
method = self._opts[pycurl.CUSTOMREQUEST]
url = self._opts[pycurl.URL]
request_body = self._opts[pycurl.POSTFIELDS]
writefn = self._opts[pycurl.WRITEFUNCTION]
if pycurl.HTTPHEADER in self._opts:
baseheaders = _FormatHeaders(self._opts[pycurl.HTTPHEADER])
else:
baseheaders = ""
headers = http.ParseHeaders(StringIO(baseheaders))
if request_body:
headers[http.HTTP_CONTENT_LENGTH] = str(len(request_body))
if self._opts.get(pycurl.HTTPAUTH, 0) & pycurl.HTTPAUTH_BASIC:
try:
userpwd = self._opts[pycurl.USERPWD]
except KeyError:
raise errors.ProgrammerError("Basic authentication requires username"
" and password")
headers[http.HTTP_AUTHORIZATION] = \
"%s %s" % (http.auth.HTTP_BASIC_AUTH, base64.b64encode(userpwd))
path = _GetPathFromUri(url)
(code, _, resp_body) = \
self._handler.FetchResponse(path, method, headers, request_body)
self._info[pycurl.RESPONSE_CODE] = code
if resp_body is not None:
writefn(resp_body)
class _RapiMock(object):
"""Mocking out the RAPI server parts.
"""
def __init__(self, user_fn, luxi_client, reqauth=False):
"""Initialize this class.
@type user_fn: callable
@param user_fn: Function to authentication username
@param luxi_client: A LUXI client implementation
"""
self.handler = \
server.rapi.RemoteApiHandler(user_fn, reqauth, _client_cls=luxi_client)
def FetchResponse(self, path, method, headers, request_body):
"""This is a callback method used to fetch a response.
This method is called by the FakeCurl.perform method
@type path: string
@param path: Requested path
@type method: string
@param method: HTTP method
@type request_body: string
@param request_body: Request body
@type headers: mimetools.Message
@param headers: Request headers
@return: Tuple containing status code, response headers and response body
"""
req_msg = http.HttpMessage()
req_msg.start_line = \
http.HttpClientToServerStartLine(method, path, http.HTTP_1_0)
req_msg.headers = headers
req_msg.body = request_body
req_reader = type('TestReader', (object, ), {'sock': None})()
(_, _, _, resp_msg) = \
http.server.HttpResponder(self.handler)(lambda: (req_msg, req_reader))
return (resp_msg.start_line.code, resp_msg.headers, resp_msg.body)
class _TestLuxiTransport(object):
"""Mocked LUXI transport.
Raises L{errors.RapiTestResult} for all method calls, no matter the
arguments.
"""
def __init__(self, record_fn, address, timeouts=None, # pylint: disable=W0613
allow_non_master=None): # pylint: disable=W0613
"""Initializes this class.
"""
self._record_fn = record_fn
def Close(self):
pass
def Call(self, data):
"""Calls LUXI method.
In this test class the method is not actually called, but added to a list
of called methods and then an exception (L{errors.RapiTestResult}) is
raised. There is no return value.
"""
(method, _, _) = rpccl.ParseRequest(data)
# Take a note of called method
self._record_fn(method)
# Everything went fine until here, so let's abort the test
raise errors.RapiTestResult
class _LuxiCallRecorder(object):
"""Records all called LUXI client methods.
"""
def __init__(self):
"""Initializes this class.
"""
self._called = set()
def Record(self, name):
"""Records a called function name.
"""
self._called.add(name)
def CalledNames(self):
"""Returns a list of called LUXI methods.
"""
return self._called
def __call__(self, address=None):
"""Creates an instrumented LUXI client.
The LUXI client will record all method calls (use L{CalledNames} to
retrieve them).
"""
return luxi.Client(transport=compat.partial(_TestLuxiTransport,
self.Record),
address=address)
def _TestWrapper(fn, *args, **kwargs):
"""Wrapper for ignoring L{errors.RapiTestResult}.
"""
try:
return fn(*args, **kwargs)
except errors.RapiTestResult:
# Everything was fine up to the point of sending a LUXI request
return NotImplemented
class InputTestClient(object):
"""Test version of RAPI client.
Instances of this class can be used to test input arguments for RAPI client
calls. See L{rapi.client.GanetiRapiClient} for available methods and their
arguments. Functions can return C{NotImplemented} if all arguments are
acceptable, but a LUXI request would be necessary to provide an actual return
value. In case of an error, L{VerificationError} is raised.
@see: An example on how to use this class can be found in
C{doc/examples/rapi_testutils.py}
"""
def __init__(self):
"""Initializes this class.
"""
username = utils.GenerateSecret()
password = utils.GenerateSecret()
# pylint: disable=W0232
class SimpleAuthenticator(object):
# pylint: disable=R0201
def ValidateRequest(self, req, _handler_access, _realm):
"""Called to verify user credentials given in HTTP request.
"""
wanted, _ = http.auth.HttpServerRequestAuthentication \
.ExtractUserPassword(req)
assert username == wanted
return users_file.PasswordFileUser(username, password,
[rapi.RAPI_ACCESS_WRITE]).name
self._lcr = _LuxiCallRecorder()
# Create a mock RAPI server
handler = _RapiMock(SimpleAuthenticator(), self._lcr)
self._client = \
rapi.client.GanetiRapiClient("master.example.com",
username=username, password=password,
curl_factory=lambda: FakeCurl(handler))
def _GetLuxiCalls(self):
"""Returns the names of all called LUXI client functions.
"""
return self._lcr.CalledNames()
def __getattr__(self, name):
"""Finds method by name.
The method is wrapped using L{_TestWrapper} to produce the actual test
result.
"""
return _HideInternalErrors(compat.partial(_TestWrapper,
getattr(self._client, name)))
| |
#!/usr/bin/python
"""
" @section DESCRIPTION
" Configurations for different RF models
"""
def _get_model_params(name='glm',
solver='linreg',
multilin=False,
rf_type='lin_rfs',
rf_truncated=False,
cf_type=None,
cf_mapping=None,
cf_alignment='edge',
cf_act_fun=None,
pos_sol=True,
reg_c_init=1e-0,
reg_type='smooth',
init=None,
n_rfs=1):
config = {'name': name,
'params': {'solver': solver,
'multilin': multilin,
'rf_type': rf_type,
'rf_truncated':rf_truncated,
'cf_type': cf_type,
'cf_mapping': cf_mapping,
'cf_alignment': cf_alignment,
'cf_act_fun': cf_act_fun,
'pos_sol': pos_sol,
'reg_c_init': reg_c_init,
'reg_type':reg_type,
'init': init,
'n_rfs': n_rfs
}
}
return config
# GLMs
glm_linreg = \
_get_model_params(name='glm',
rf_type='max_rfs',
multilin=False)
glm_linreg_multilin = \
_get_model_params(name='glm',
rf_type='max_rfs',
multilin=True)
glm_logreg = \
_get_model_params(name='glm',
rf_type='max_rfs',
solver='logreg',
multilin=False)
glm_logreg_multilin = \
_get_model_params(name='glm',
rf_type='max_rfs',
solver='logreg',
multilin=True)
glm_logreg2 = \
_get_model_params(name='glm',
n_rfs=2,
rf_type='max_rfs',
solver='logreg',
multilin=False)
glm_logreg3 = \
_get_model_params(name='glm',
n_rfs=3,
rf_type='max_rfs',
solver='logreg',
multilin=False)
glm_logreg4 = \
_get_model_params(name='glm',
n_rfs=4,
rf_type='max_rfs',
solver='logreg',
multilin=False)
glm_poireg = \
_get_model_params(name='glm',
rf_type='max_rfs',
solver='poireg',
multilin=False)
glm_poireg_multilin = \
_get_model_params(name='glm',
rf_type='max_rfs',
solver='poireg',
multilin=True)
# Context models, including subunit models
glm_linreg_ctx_same = \
_get_model_params(name='ctx',
cf_type='ctx',
multilin=False,
cf_mapping='same')
glm_linreg_ctx_same_ml = \
_get_model_params(name='ctx',
cf_type='ctx',
multilin=True,
cf_mapping='same')
glm_linreg_ctx_nl_same_adaptive = \
_get_model_params(name='ctx',
cf_type='ctx_nl',
solver='linreg',
multilin=False,
cf_mapping='same',
cf_act_fun='adaptive',
init = 'LinRegCtx_')
glm_linreg_subunit_same_adaptive = \
_get_model_params(name='ctx',
cf_type='subunit',
solver='linreg',
multilin=False,
cf_mapping='same',
cf_act_fun='adaptive',
init = 'LinRegCtx_')
glm_linreg_subunit_t_same_adaptive = \
_get_model_params(name='ctx',
rf_truncated=True,
cf_type='subunit',
solver='linreg',
multilin=False,
cf_mapping='same',
cf_act_fun='adaptive',
init = 'LinRegCtx_')
glm_logreg_ctx_same = \
_get_model_params(name='ctx',
cf_type='ctx',
solver='logreg',
multilin=False,
cf_mapping='same')
glm_logreg_ctx_same_ml = \
_get_model_params(name='ctx',
multilin=True,
cf_type='ctx',
solver='logreg',
cf_mapping='same',
init='LogRegCtx_')
glm_logreg_ctx_same_c = \
_get_model_params(name='ctx',
cf_type='ctx',
solver='logreg',
multilin=False,
cf_mapping='same',
cf_alignment='center')
glm_logreg_ctx_nl_same_adaptive = \
_get_model_params(name='ctx',
cf_type='ctx_nl',
solver='logreg',
multilin=False,
cf_mapping='same',
cf_act_fun='adaptive',
init = 'LogRegCtx_')
glm_logreg_ctx_nl_same_rectified = \
_get_model_params(name='ctx',
cf_type='ctx_nl',
solver='logreg',
multilin=False,
cf_mapping='same',
cf_act_fun='rectified',
init = 'LogRegCtx_')
glm_logreg_subunit_same_adaptive = \
_get_model_params(name='ctx',
cf_type='subunit',
solver='logreg',
multilin=False,
cf_mapping='same',
cf_act_fun='adaptive',
init = 'LogRegCtx_')
glm_logreg_subunit_t_same_adaptive = \
_get_model_params(name='ctx',
rf_truncated=True,
cf_type='subunit',
solver='logreg',
multilin=False,
cf_mapping='same',
cf_act_fun='adaptive',
init = 'LogRegCtx_')
glm_poireg_ctx_same = \
_get_model_params(name='ctx',
cf_type='ctx',
solver='poireg',
multilin=False,
cf_mapping='same',
init='PoiRegCtx_')
glm_poireg_ctx_same_ml = \
_get_model_params(name='ctx',
cf_type='ctx',
solver='poireg',
multilin=True,
cf_mapping='same',
init='PoiRegCtx_')
glm_poireg_ctx_nl_same_adaptive = \
_get_model_params(name='ctx',
cf_type='ctx_nl',
solver='poireg',
multilin=False,
cf_mapping='same',
cf_act_fun='adaptive',
init = 'PoiRegCtx_')
glm_poireg_subunit_same_adaptive = \
_get_model_params(name='ctx',
cf_type='subunit',
solver='poireg',
multilin=False,
cf_mapping='same',
cf_act_fun='adaptive',
init = 'PoiRegCtx_')
glm_poireg_subunit_t_same_adaptive = \
_get_model_params(name='ctx',
rf_truncated=True,
cf_type='subunit',
solver='poireg',
multilin=False,
cf_mapping='same',
cf_act_fun='adaptive',
init = 'PoiRegCtx_')
glm_poireg_ctx_temporal = \
_get_model_params(name='ctx',
cf_type='ctx',
solver='poireg',
multilin=False,
cf_mapping='temporal')
glm_poireg_subunit_temporal_adaptive = \
_get_model_params(name='ctx',
cf_type='subunit',
solver='poireg',
multilin=False,
cf_mapping='temporal',
cf_act_fun='adaptive',
init = 'PoiRegCtx_')
# Multi-filter LN models
ln_stc1 = _get_model_params(name='ln',
n_rfs=1,
solver='stc')
ln_stc2 = _get_model_params(name='ln',
n_rfs=2,
solver='stc')
ln_stc3 = _get_model_params(name='ln',
n_rfs=3,
solver='stc')
ln_stc4 = _get_model_params(name='ln',
n_rfs=4,
solver='stc')
ln_stc6 = _get_model_params(name='ln',
n_rfs=6,
solver='stc')
ln_stc1_w = _get_model_params(name='ln',
n_rfs=1,
solver='stc_w')
ln_stc2_w = _get_model_params(name='ln',
n_rfs=2,
solver='stc_w')
ln_stc3_w = _get_model_params(name='ln',
n_rfs=3,
solver='stc_w')
ln_stc4_w = _get_model_params(name='ln',
n_rfs=4,
solver='stc_w')
ln_istac1 = \
_get_model_params(name='ln',
n_rfs=1,
solver='istac')
ln_istac2 = \
_get_model_params(name='ln',
n_rfs=2,
solver='istac')
ln_istac3 = \
_get_model_params(name='ln',
n_rfs=3,
solver='istac')
ln_istac4 = \
_get_model_params(name='ln',
n_rfs=4,
solver='istac')
ln_istac6 = \
_get_model_params(name='ln',
n_rfs=6,
solver='istac')
qn_mne1_C_0 = \
_get_model_params(name='qn',
n_rfs=1,
solver='mne',
reg_c_init=1e-0,
init='MNE2_C1.0')
qn_mne2_C_0 = \
_get_model_params(name='qn',
n_rfs=2,
solver='mne',
reg_c_init=1e-0,
init='MNE2_C1.0')
qn_mne3_C_0 = \
_get_model_params(name='qn',
n_rfs=3,
solver='mne',
reg_c_init=1e-0,
init='MNE2_C1.0')
qn_mne4_C_0 = \
_get_model_params(name='qn',
n_rfs=4,
solver='mne',
reg_c_init=1e-0,
init='MNE2_C1.0')
qn_mne1_C_1 = \
_get_model_params(name='qn',
n_rfs=1,
solver='mne',
reg_c_init=1e-1,
init='MNE2_C0.1')
qn_mne2_C_1 = \
_get_model_params(name='qn',
n_rfs=2,
solver='mne',
reg_c_init=1e-1,
init='MNE2_C0.1')
qn_mne3_C_1 = \
_get_model_params(name='qn',
n_rfs=3,
solver='mne',
reg_c_init=1e-1,
init='MNE2_C0.1')
qn_mne4_C_1 = \
_get_model_params(name='qn',
n_rfs=4,
solver='mne',
reg_c_init=1e-1,
init='MNE2_C0.1')
qn_mne1_C_2 = \
_get_model_params(name='qn',
n_rfs=1,
solver='mne',
reg_c_init=1e-2,
init='MNE2_C0.01')
qn_mne2_C_2 = \
_get_model_params(name='qn',
n_rfs=2,
solver='mne',
reg_c_init=1e-2,
init='MNE2_C0.01')
qn_mne3_C_2 = \
_get_model_params(name='qn',
n_rfs=3,
solver='mne',
reg_c_init=1e-2,
init='MNE2_C0.01')
qn_mne4_C_2 = \
_get_model_params(name='qn',
n_rfs=4,
solver='mne',
reg_c_init=1e-2,
init='MNE2_C0.01')
qn_mne1_C_3 = \
_get_model_params(name='qn',
n_rfs=1,
solver='mne',
reg_c_init=1e-3,
init='MNE2_C0.001')
qn_mne2_C_3 = \
_get_model_params(name='qn',
n_rfs=2,
solver='mne',
reg_c_init=1e-3,
init='MNE2_C0.001')
qn_mne3_C_3 = \
_get_model_params(name='qn',
n_rfs=3,
solver='mne',
reg_c_init=1e-3,
init='MNE2_C0.001')
qn_mne4_C_3 = \
_get_model_params(name='qn',
n_rfs=4,
solver='mne',
reg_c_init=1e-3,
init='MNE2_C0.001')
qn_mne1_C_4 = \
_get_model_params(name='qn',
n_rfs=1,
solver='mne',
reg_c_init=1e-4,
init='MNE2_C0.0001')
qn_mne2_C_4 = \
_get_model_params(name='qn',
n_rfs=2,
solver='mne',
reg_c_init=1e-4,
init='MNE4_C0.0001')
qn_mne3_C_4 = \
_get_model_params(name='qn',
n_rfs=3,
solver='mne',
reg_c_init=1e-4,
init='MNE2_C0.0001')
qn_mne4_C_4 = \
_get_model_params(name='qn',
n_rfs=4,
solver='mne',
reg_c_init=1e-4,
init='MNE2_C0.0001')
qn_mne4_C_5 = \
_get_model_params(name='qn',
n_rfs=4,
solver='mne',
reg_c_init=1e-5,
init='MNE2_C0.0001')
qn_mne2_best = \
_get_model_params(name='qn',
n_rfs=2,
solver='mne',
reg_c_init=1e-1,
init='MNE2')
qn_mne4_best = \
_get_model_params(name='qn',
n_rfs=4,
solver='mne',
reg_c_init=1e-1,
init='MNE4')
# MID models (Models that maximize the single spike information directly)
mid1 = \
_get_model_params(name='mid')
mid1_ml = \
_get_model_params(name='mid',
multilin=True)
mid1_ml_lin_init = \
_get_model_params(name='mid',
multilin=True,
init='LinReg_')
mid1_ml_poi_init = \
_get_model_params(name='mid',
multilin=True,
init='PoiReg_')
mid1_istac_init = \
_get_model_params(name='mid',
init='iSTAC1_')
mid1_lin_init = \
_get_model_params(name='mid',
init='LinReg_')
mid1_log_init = \
_get_model_params(name='mid',
init='LogReg_')
mid1_poi_init = \
_get_model_params(name='mid',
init='PoiReg_')
mid1_pos_inv = \
_get_model_params(name='mid',
rf_type='pos_inv_rfs')
mid1_ctx_same = \
_get_model_params(name='mid',
cf_mapping='same')
mid1_ctx_same_linreg_init = \
_get_model_params(name='mid',
cf_mapping='same',
init='LinRegCtx_')
mid1_ctx_same_logreg_init = \
_get_model_params(name='mid',
cf_mapping='same',
init='LogRegCtx_')
mid1_ctx_same_poireg_init = \
_get_model_params(name='mid',
cf_mapping='same',
init='PoiRegCtx_')
mid2 = \
_get_model_params(name='mid',
init='none',
n_rfs=2)
mid2_stc_init = \
_get_model_params(name='mid',
init='STC2_',
n_rfs=2)
mid2_istac_init = \
_get_model_params(name='mid',
init='iSTAC2_',
n_rfs=2)
mid2_mne_init = \
_get_model_params(name='mid',
init='MNE2',
n_rfs=2)
mid2_mid_init = \
_get_model_params(name='mid',
init='MID2_STC2__ori',
n_rfs=2)
| |
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import errno
import hashlib
import os
import os.path
import shutil
import tempfile
import jinja2
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import netutils
import six
from ironic.common import exception
from ironic.common import utils
from ironic.tests import base
CONF = cfg.CONF
class BareMetalUtilsTestCase(base.TestCase):
def test_create_link(self):
with mock.patch.object(os, "symlink", autospec=True) as symlink_mock:
symlink_mock.return_value = None
utils.create_link_without_raise("/fake/source", "/fake/link")
symlink_mock.assert_called_once_with("/fake/source", "/fake/link")
def test_create_link_EEXIST(self):
with mock.patch.object(os, "symlink", autospec=True) as symlink_mock:
symlink_mock.side_effect = OSError(errno.EEXIST)
utils.create_link_without_raise("/fake/source", "/fake/link")
symlink_mock.assert_called_once_with("/fake/source", "/fake/link")
class ExecuteTestCase(base.TestCase):
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
with os.fdopen(fd, 'w+') as fp:
fp.write('''#!/bin/sh
# If stdin fails to get passed during one of the runs, make a note.
if ! grep -q foo
then
echo 'failure' > "$1"
fi
# If stdin has failed to get passed during this or a previous run, exit early.
if grep failure "$1"
then
exit 1
fi
runs="$(cat $1)"
if [ -z "$runs" ]
then
runs=0
fi
runs=$(($runs + 1))
echo $runs > "$1"
exit 1
''')
os.chmod(tmpfilename, 0o755)
try:
self.assertRaises(processutils.ProcessExecutionError,
utils.execute,
tmpfilename, tmpfilename2, attempts=10,
process_input=b'foo',
delay_on_retry=False)
except OSError as e:
if e.errno == errno.EACCES:
self.skipTest("Permissions error detected. "
"Are you running with a noexec /tmp?")
else:
raise
with open(tmpfilename2, 'r') as fp:
runs = fp.read()
self.assertNotEqual(runs.strip(), 'failure', 'stdin did not '
'always get passed '
'correctly')
runs = int(runs.strip())
self.assertEqual(10, runs,
'Ran %d times instead of 10.' % (runs,))
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
def test_unknown_kwargs_raises_error(self):
self.assertRaises(processutils.UnknownArgumentError,
utils.execute,
'/usr/bin/env', 'true',
this_is_not_a_valid_kwarg=True)
def test_check_exit_code_boolean(self):
utils.execute('/usr/bin/env', 'false', check_exit_code=False)
self.assertRaises(processutils.ProcessExecutionError,
utils.execute,
'/usr/bin/env', 'false', check_exit_code=True)
def test_no_retry_on_success(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
with os.fdopen(fd, 'w+') as fp:
fp.write('''#!/bin/sh
# If we've already run, bail out.
grep -q foo "$1" && exit 1
# Mark that we've run before.
echo foo > "$1"
# Check that stdin gets passed correctly.
grep foo
''')
os.chmod(tmpfilename, 0o755)
try:
utils.execute(tmpfilename,
tmpfilename2,
process_input=b'foo',
attempts=2)
except OSError as e:
if e.errno == errno.EACCES:
self.skipTest("Permissions error detected. "
"Are you running with a noexec /tmp?")
else:
raise
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
@mock.patch.object(processutils, 'execute', autospec=True)
@mock.patch.object(os.environ, 'copy', return_value={}, autospec=True)
def test_execute_use_standard_locale_no_env_variables(self, env_mock,
execute_mock):
utils.execute('foo', use_standard_locale=True)
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C'})
@mock.patch.object(processutils, 'execute', autospec=True)
def test_execute_use_standard_locale_with_env_variables(self,
execute_mock):
utils.execute('foo', use_standard_locale=True,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C',
'foo': 'bar'})
@mock.patch.object(processutils, 'execute', autospec=True)
def test_execute_not_use_standard_locale(self, execute_mock):
utils.execute('foo', use_standard_locale=False,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'foo': 'bar'})
def test_execute_get_root_helper(self):
with mock.patch.object(
processutils, 'execute', autospec=True) as execute_mock:
helper = utils._get_root_helper()
utils.execute('foo', run_as_root=True)
execute_mock.assert_called_once_with('foo', run_as_root=True,
root_helper=helper)
def test_execute_without_root_helper(self):
with mock.patch.object(
processutils, 'execute', autospec=True) as execute_mock:
utils.execute('foo', run_as_root=False)
execute_mock.assert_called_once_with('foo', run_as_root=False)
class GenericUtilsTestCase(base.TestCase):
@mock.patch.object(utils, 'hashlib', autospec=True)
def test__get_hash_object(self, hashlib_mock):
algorithms_available = ('md5', 'sha1', 'sha224',
'sha256', 'sha384', 'sha512')
hashlib_mock.algorithms_guaranteed = algorithms_available
hashlib_mock.algorithms = algorithms_available
# | WHEN |
utils._get_hash_object('md5')
utils._get_hash_object('sha1')
utils._get_hash_object('sha224')
utils._get_hash_object('sha256')
utils._get_hash_object('sha384')
utils._get_hash_object('sha512')
# | THEN |
calls = [mock.call.md5(), mock.call.sha1(), mock.call.sha224(),
mock.call.sha256(), mock.call.sha384(), mock.call.sha512()]
hashlib_mock.assert_has_calls(calls)
def test__get_hash_object_throws_for_invalid_or_unsupported_hash_name(
self):
# | WHEN | & | THEN |
self.assertRaises(exception.InvalidParameterValue,
utils._get_hash_object,
'hickory-dickory-dock')
def test_hash_file_for_md5(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.md5(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object) # using default, 'md5'
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_for_sha1(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.sha1(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object, 'sha1')
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_for_sha512(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.sha512(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object, 'sha512')
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_throws_for_invalid_or_unsupported_hash(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
# | WHEN | & | THEN |
self.assertRaises(exception.InvalidParameterValue, utils.hash_file,
file_like_object, 'hickory-dickory-dock')
def test_file_has_content_equal(self):
data = b'Mary had a little lamb, its fleece as white as snow'
ref = data
with mock.patch('ironic.common.utils.open',
mock.mock_open(read_data=data)) as mopen:
self.assertTrue(utils.file_has_content('foo', ref))
mopen.assert_called_once_with('foo', 'rb')
def test_file_has_content_differ(self):
data = b'Mary had a little lamb, its fleece as white as snow'
ref = data + b'!'
with mock.patch('ironic.common.utils.open',
mock.mock_open(read_data=data)) as mopen:
self.assertFalse(utils.file_has_content('foo', ref))
mopen.assert_called_once_with('foo', 'rb')
def test_is_valid_datapath_id(self):
self.assertTrue(utils.is_valid_datapath_id("525400cf2d319fdf"))
self.assertTrue(utils.is_valid_datapath_id("525400CF2D319FDF"))
self.assertFalse(utils.is_valid_datapath_id("52"))
self.assertFalse(utils.is_valid_datapath_id("52:54:00:cf:2d:31"))
self.assertFalse(utils.is_valid_datapath_id("notadatapathid00"))
self.assertFalse(utils.is_valid_datapath_id("5525400CF2D319FDF"))
def test_is_hostname_safe(self):
self.assertTrue(utils.is_hostname_safe('spam'))
self.assertFalse(utils.is_hostname_safe('spAm'))
self.assertFalse(utils.is_hostname_safe('SPAM'))
self.assertFalse(utils.is_hostname_safe('-spam'))
self.assertFalse(utils.is_hostname_safe('spam-'))
self.assertTrue(utils.is_hostname_safe('spam-eggs'))
self.assertFalse(utils.is_hostname_safe('spam_eggs'))
self.assertFalse(utils.is_hostname_safe('spam eggs'))
self.assertTrue(utils.is_hostname_safe('spam.eggs'))
self.assertTrue(utils.is_hostname_safe('9spam'))
self.assertTrue(utils.is_hostname_safe('spam7'))
self.assertTrue(utils.is_hostname_safe('br34kf4st'))
self.assertFalse(utils.is_hostname_safe('$pam'))
self.assertFalse(utils.is_hostname_safe('egg$'))
self.assertFalse(utils.is_hostname_safe('spam#eggs'))
self.assertFalse(utils.is_hostname_safe(' eggs'))
self.assertFalse(utils.is_hostname_safe('spam '))
self.assertTrue(utils.is_hostname_safe('s'))
self.assertTrue(utils.is_hostname_safe('s' * 63))
self.assertFalse(utils.is_hostname_safe('s' * 64))
self.assertFalse(utils.is_hostname_safe(''))
self.assertFalse(utils.is_hostname_safe(None))
# Need to ensure a binary response for success or fail
self.assertIsNotNone(utils.is_hostname_safe('spam'))
self.assertIsNotNone(utils.is_hostname_safe('-spam'))
self.assertTrue(utils.is_hostname_safe('www.rackspace.com'))
self.assertTrue(utils.is_hostname_safe('www.rackspace.com.'))
self.assertTrue(utils.is_hostname_safe('http._sctp.www.example.com'))
self.assertTrue(utils.is_hostname_safe('mail.pets_r_us.net'))
self.assertTrue(utils.is_hostname_safe('mail-server-15.my_host.org'))
self.assertFalse(utils.is_hostname_safe('www.nothere.com_'))
self.assertFalse(utils.is_hostname_safe('www.nothere_.com'))
self.assertFalse(utils.is_hostname_safe('www..nothere.com'))
long_str = 'a' * 63 + '.' + 'b' * 63 + '.' + 'c' * 63 + '.' + 'd' * 63
self.assertTrue(utils.is_hostname_safe(long_str))
self.assertFalse(utils.is_hostname_safe(long_str + '.'))
self.assertFalse(utils.is_hostname_safe('a' * 255))
def test_is_valid_logical_name(self):
valid = (
'spam', 'spAm', 'SPAM', 'spam-eggs', 'spam.eggs', 'spam_eggs',
'spam~eggs', '9spam', 'spam7', '~spam', '.spam', '.~-_', '~',
'br34kf4st', 's', 's' * 63, 's' * 255)
invalid = (
' ', 'spam eggs', '$pam', 'egg$', 'spam#eggs',
' eggs', 'spam ', '', None, 'spam%20')
for hostname in valid:
result = utils.is_valid_logical_name(hostname)
# Need to ensure a binary response for success. assertTrue
# is too generous, and would pass this test if, for
# instance, a regex Match object were returned.
self.assertIs(result, True,
"%s is unexpectedly invalid" % hostname)
for hostname in invalid:
result = utils.is_valid_logical_name(hostname)
# Need to ensure a binary response for
# success. assertFalse is too generous and would pass this
# test if None were returned.
self.assertIs(result, False,
"%s is unexpectedly valid" % hostname)
def test_validate_and_normalize_mac(self):
mac = 'AA:BB:CC:DD:EE:FF'
with mock.patch.object(netutils, 'is_valid_mac',
autospec=True) as m_mock:
m_mock.return_value = True
self.assertEqual(mac.lower(),
utils.validate_and_normalize_mac(mac))
def test_validate_and_normalize_datapath_id(self):
datapath_id = 'AA:BB:CC:DD:EE:FF'
with mock.patch.object(utils, 'is_valid_datapath_id',
autospec=True) as m_mock:
m_mock.return_value = True
self.assertEqual(datapath_id.lower(),
utils.validate_and_normalize_datapath_id(
datapath_id))
def test_validate_and_normalize_mac_invalid_format(self):
with mock.patch.object(netutils, 'is_valid_mac',
autospec=True) as m_mock:
m_mock.return_value = False
self.assertRaises(exception.InvalidMAC,
utils.validate_and_normalize_mac, 'invalid-mac')
def test_safe_rstrip(self):
value = '/test/'
rstripped_value = '/test'
not_rstripped = '/'
self.assertEqual(rstripped_value, utils.safe_rstrip(value, '/'))
self.assertEqual(not_rstripped, utils.safe_rstrip(not_rstripped, '/'))
def test_safe_rstrip_not_raises_exceptions(self):
# Supplying an integer should normally raise an exception because it
# does not save the rstrip() method.
value = 10
# In the case of raising an exception safe_rstrip() should return the
# original value.
self.assertEqual(value, utils.safe_rstrip(value))
@mock.patch.object(os.path, 'getmtime', return_value=1439465889.4964755,
autospec=True)
def test_unix_file_modification_datetime(self, mtime_mock):
expected = datetime.datetime(2015, 8, 13, 11, 38, 9, 496475)
self.assertEqual(expected,
utils.unix_file_modification_datetime('foo'))
mtime_mock.assert_called_once_with('foo')
def test_is_valid_no_proxy(self):
# Valid values for 'no_proxy'
valid_no_proxy = [
('a' * 63 + '.' + '0' * 63 + '.c.' + 'd' * 61 + '.' + 'e' * 61),
('A' * 63 + '.' + '0' * 63 + '.C.' + 'D' * 61 + '.' + 'E' * 61),
('.' + 'a' * 62 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.' +
'e' * 61),
',,example.com:3128,',
'192.168.1.1', # IP should be valid
]
# Test each one individually, so if failure easier to determine which
# one failed.
for no_proxy in valid_no_proxy:
self.assertTrue(
utils.is_valid_no_proxy(no_proxy),
msg="'no_proxy' value should be valid: {}".format(no_proxy))
# Test valid when joined together
self.assertTrue(utils.is_valid_no_proxy(','.join(valid_no_proxy)))
# Test valid when joined together with whitespace
self.assertTrue(utils.is_valid_no_proxy(' , '.join(valid_no_proxy)))
# empty string should also be valid
self.assertTrue(utils.is_valid_no_proxy(''))
# Invalid values for 'no_proxy'
invalid_no_proxy = [
('A' * 64 + '.' + '0' * 63 + '.C.' + 'D' * 61 + '.' +
'E' * 61), # too long (> 253)
('a' * 100),
'a..com',
('.' + 'a' * 63 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.' +
'e' * 61), # too long (> 251 after deleting .)
('*.' + 'a' * 60 + '.' + '0' * 60 + '.c.' + 'd' * 61 + '.' +
'e' * 61), # starts with *.
'c.-a.com',
'c.a-.com',
]
for no_proxy in invalid_no_proxy:
self.assertFalse(
utils.is_valid_no_proxy(no_proxy),
msg="'no_proxy' value should be invalid: {}".format(no_proxy))
@mock.patch.object(utils, 'LOG', autospec=True)
def test_warn_about_deprecated_extra_vif_port_id(self, mock_log):
# Set variable to default value
utils.warn_deprecated_extra_vif_port_id = False
utils.warn_about_deprecated_extra_vif_port_id()
utils.warn_about_deprecated_extra_vif_port_id()
self.assertEqual(1, mock_log.warning.call_count)
self.assertIn("extra['vif_port_id'] is deprecated and will not",
mock_log.warning.call_args[0][0])
class TempFilesTestCase(base.TestCase):
def test_tempdir(self):
dirname = None
with utils.tempdir() as tempdir:
self.assertTrue(os.path.isdir(tempdir))
dirname = tempdir
self.assertFalse(os.path.exists(dirname))
@mock.patch.object(shutil, 'rmtree', autospec=True)
@mock.patch.object(tempfile, 'mkdtemp', autospec=True)
def test_tempdir_mocked(self, mkdtemp_mock, rmtree_mock):
self.config(tempdir='abc')
mkdtemp_mock.return_value = 'temp-dir'
kwargs = {'dir': 'b'}
with utils.tempdir(**kwargs) as tempdir:
self.assertEqual('temp-dir', tempdir)
tempdir_created = tempdir
mkdtemp_mock.assert_called_once_with(**kwargs)
rmtree_mock.assert_called_once_with(tempdir_created)
@mock.patch.object(utils, 'LOG', autospec=True)
@mock.patch.object(shutil, 'rmtree', autospec=True)
@mock.patch.object(tempfile, 'mkdtemp', autospec=True)
def test_tempdir_mocked_error_on_rmtree(self, mkdtemp_mock, rmtree_mock,
log_mock):
self.config(tempdir='abc')
mkdtemp_mock.return_value = 'temp-dir'
rmtree_mock.side_effect = OSError
with utils.tempdir() as tempdir:
self.assertEqual('temp-dir', tempdir)
tempdir_created = tempdir
rmtree_mock.assert_called_once_with(tempdir_created)
self.assertTrue(log_mock.error.called)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_with_pass_in(self, mock_free_space, mock_dir_writable,
mock_exists):
mock_exists.return_value = True
# test passing in a directory and size
utils.check_dir(directory_to_check='/fake/path', required_space=5)
mock_exists.assert_called_once_with('/fake/path')
mock_dir_writable.assert_called_once_with('/fake/path')
mock_free_space.assert_called_once_with('/fake/path', 5)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_no_dir(self, mock_free_space, mock_dir_writable,
mock_exists):
mock_exists.return_value = False
self.config(tempdir='/fake/path')
self.assertRaises(exception.PathNotFound, utils.check_dir)
mock_exists.assert_called_once_with(CONF.tempdir)
self.assertFalse(mock_free_space.called)
self.assertFalse(mock_dir_writable.called)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_ok(self, mock_free_space, mock_dir_writable,
mock_exists):
mock_exists.return_value = True
self.config(tempdir='/fake/path')
utils.check_dir()
mock_exists.assert_called_once_with(CONF.tempdir)
mock_dir_writable.assert_called_once_with(CONF.tempdir)
mock_free_space.assert_called_once_with(CONF.tempdir, 1)
@mock.patch.object(os, 'access', autospec=True)
def test__check_dir_writable_ok(self, mock_access):
mock_access.return_value = True
self.assertIsNone(utils._check_dir_writable("/fake/path"))
mock_access.assert_called_once_with("/fake/path", os.W_OK)
@mock.patch.object(os, 'access', autospec=True)
def test__check_dir_writable_not_writable(self, mock_access):
mock_access.return_value = False
self.assertRaises(exception.DirectoryNotWritable,
utils._check_dir_writable, "/fake/path")
mock_access.assert_called_once_with("/fake/path", os.W_OK)
@mock.patch.object(os, 'statvfs', autospec=True)
def test__check_dir_free_space_ok(self, mock_stat):
statvfs_mock_return = mock.MagicMock()
statvfs_mock_return.f_bsize = 5
statvfs_mock_return.f_frsize = 0
statvfs_mock_return.f_blocks = 0
statvfs_mock_return.f_bfree = 0
statvfs_mock_return.f_bavail = 1024 * 1024
statvfs_mock_return.f_files = 0
statvfs_mock_return.f_ffree = 0
statvfs_mock_return.f_favail = 0
statvfs_mock_return.f_flag = 0
statvfs_mock_return.f_namemax = 0
mock_stat.return_value = statvfs_mock_return
utils._check_dir_free_space("/fake/path")
mock_stat.assert_called_once_with("/fake/path")
@mock.patch.object(os, 'statvfs', autospec=True)
def test_check_dir_free_space_raises(self, mock_stat):
statvfs_mock_return = mock.MagicMock()
statvfs_mock_return.f_bsize = 1
statvfs_mock_return.f_frsize = 0
statvfs_mock_return.f_blocks = 0
statvfs_mock_return.f_bfree = 0
statvfs_mock_return.f_bavail = 1024
statvfs_mock_return.f_files = 0
statvfs_mock_return.f_ffree = 0
statvfs_mock_return.f_favail = 0
statvfs_mock_return.f_flag = 0
statvfs_mock_return.f_namemax = 0
mock_stat.return_value = statvfs_mock_return
self.assertRaises(exception.InsufficientDiskSpace,
utils._check_dir_free_space, "/fake/path")
mock_stat.assert_called_once_with("/fake/path")
class GetUpdatedCapabilitiesTestCase(base.TestCase):
def test_get_updated_capabilities(self):
capabilities = {'ilo_firmware_version': 'xyz'}
cap_string = 'ilo_firmware_version:xyz'
cap_returned = utils.get_updated_capabilities(None, capabilities)
self.assertEqual(cap_string, cap_returned)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_multiple_keys(self):
capabilities = {'ilo_firmware_version': 'xyz',
'foo': 'bar', 'somekey': 'value'}
cap_string = 'ilo_firmware_version:xyz,foo:bar,somekey:value'
cap_returned = utils.get_updated_capabilities(None, capabilities)
set1 = set(cap_string.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_invalid_capabilities(self):
capabilities = 'ilo_firmware_version'
self.assertRaises(ValueError,
utils.get_updated_capabilities,
capabilities, {})
def test_get_updated_capabilities_capabilities_not_dict(self):
capabilities = ['ilo_firmware_version:xyz', 'foo:bar']
self.assertRaises(ValueError,
utils.get_updated_capabilities,
None, capabilities)
def test_get_updated_capabilities_add_to_existing_capabilities(self):
new_capabilities = {'BootMode': 'uefi'}
expected_capabilities = 'BootMode:uefi,foo:bar'
cap_returned = utils.get_updated_capabilities('foo:bar',
new_capabilities)
set1 = set(expected_capabilities.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_replace_to_existing_capabilities(self):
new_capabilities = {'BootMode': 'bios'}
expected_capabilities = 'BootMode:bios'
cap_returned = utils.get_updated_capabilities('BootMode:uefi',
new_capabilities)
set1 = set(expected_capabilities.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_validate_network_port(self):
port = utils.validate_network_port('1', 'message')
self.assertEqual(1, port)
port = utils.validate_network_port('65535')
self.assertEqual(65535, port)
def test_validate_network_port_fail(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'Port "65536" is out of range.',
utils.validate_network_port,
'65536')
self.assertRaisesRegex(exception.InvalidParameterValue,
'fake_port "-1" is out of range.',
utils.validate_network_port,
'-1',
'fake_port')
self.assertRaisesRegex(exception.InvalidParameterValue,
'Port "invalid" is not a valid integer.',
utils.validate_network_port,
'invalid')
class JinjaTemplatingTestCase(base.TestCase):
def setUp(self):
super(JinjaTemplatingTestCase, self).setUp()
self.template = '{{ foo }} {{ bar }}'
self.params = {'foo': 'spam', 'bar': 'ham'}
self.expected = 'spam ham'
def test_render_string(self):
self.assertEqual(self.expected,
utils.render_template(self.template,
self.params,
is_file=False))
@mock.patch('ironic.common.utils.jinja2.FileSystemLoader')
def test_render_file(self, jinja_fsl_mock):
path = '/path/to/template.j2'
jinja_fsl_mock.return_value = jinja2.DictLoader(
{'template.j2': self.template})
self.assertEqual(self.expected,
utils.render_template(path,
self.params))
jinja_fsl_mock.assert_called_once_with('/path/to')
| |
# politician/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
import re
from django.db import models
from django.db.models import Q
import wevote_functions.admin
from exception.models import handle_exception, handle_record_found_more_than_one_exception
from tag.models import Tag
from wevote_functions.functions import convert_to_political_party_constant, \
display_full_name_with_correct_capitalization, \
extract_first_name_from_full_name, extract_middle_name_from_full_name, \
extract_last_name_from_full_name, extract_twitter_handle_from_text_string, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_id_politician_integer, fetch_site_unique_id_prefix
FEMALE = 'F'
GENDER_NEUTRAL = 'N'
MALE = 'M'
UNKNOWN = 'U'
GENDER_CHOICES = (
(FEMALE, 'Female'),
(GENDER_NEUTRAL, 'Gender Neutral'),
(MALE, 'Male'),
(UNKNOWN, 'Unknown'),
)
logger = wevote_functions.admin.get_logger(__name__)
# When merging candidates, these are the fields we check for figure_out_candidate_conflict_values
POLITICIAN_UNIQUE_IDENTIFIERS = [
'ballotpedia_id',
'bioguide_id',
'birth_date',
'cspan_id',
'ctcl_uuid',
'first_name',
'gender',
'govtrack_id',
'house_history_id',
'icpsr_id',
'last_name',
'lis_id',
'maplight_id',
'middle_name',
'opensecrets_id',
'political_party',
'politician_email_address',
'politician_facebook_id',
'politician_googleplus_id',
'politician_name',
'politician_phone_number',
'politician_twitter_handle',
'politician_url',
'politician_youtube_id',
'state_code',
'thomas_id',
'vote_smart_id',
'vote_usa_politician_id',
'washington_post_id',
'we_vote_hosted_profile_image_url_large',
'we_vote_hosted_profile_image_url_medium',
'we_vote_hosted_profile_image_url_tiny',
'wikipedia_id',
]
class Politician(models.Model):
# We are relying on built-in Python id field
# The we_vote_id identifier is unique across all We Vote sites, and allows us to share our data with other
# organizations
# It starts with "wv" then we add on a database specific identifier like "3v" (WeVoteSetting.site_unique_id_prefix)
# then the string "pol", and then a sequential integer like "123".
# We keep the last value in WeVoteSetting.we_vote_id_last_politician_integer
we_vote_id = models.CharField(
verbose_name="we vote permanent id of this politician", max_length=255, default=None, null=True,
blank=True, unique=True)
# See this url for properties: https://docs.python.org/2/library/functions.html#property
first_name = models.CharField(verbose_name="first name",
max_length=255, default=None, null=True, blank=True)
middle_name = models.CharField(verbose_name="middle name",
max_length=255, default=None, null=True, blank=True)
last_name = models.CharField(verbose_name="last name",
max_length=255, default=None, null=True, blank=True)
politician_name = models.CharField(verbose_name="official full name",
max_length=255, default=None, null=True, blank=True)
# This is the politician's name from GoogleCivicCandidateCampaign
google_civic_candidate_name = models.CharField(verbose_name="full name from google civic",
max_length=255, default=None, null=True, blank=True)
google_civic_candidate_name2 = models.CharField(max_length=255, null=True)
google_civic_candidate_name3 = models.CharField(max_length=255, null=True)
# This is the politician's name assembled from TheUnitedStatesIo first_name + last_name for quick search
full_name_assembled = models.CharField(verbose_name="full name assembled from first_name + last_name",
max_length=255, default=None, null=True, blank=True)
gender = models.CharField("gender", max_length=1, choices=GENDER_CHOICES, default=UNKNOWN)
birth_date = models.DateField("birth date", default=None, null=True, blank=True)
# race = enum?
# official_image_id = ??
bioguide_id = models.CharField(verbose_name="bioguide unique identifier",
max_length=200, null=True, unique=True)
thomas_id = models.CharField(verbose_name="thomas unique identifier",
max_length=200, null=True, unique=True)
lis_id = models.CharField(verbose_name="lis unique identifier",
max_length=200, null=True, blank=True, unique=False)
govtrack_id = models.CharField(verbose_name="govtrack unique identifier",
max_length=200, null=True, unique=True)
opensecrets_id = models.CharField(verbose_name="opensecrets unique identifier",
max_length=200, null=True, unique=False)
vote_smart_id = models.CharField(verbose_name="votesmart unique identifier",
max_length=200, null=True, unique=False)
fec_id = models.CharField(verbose_name="fec unique identifier",
max_length=200, null=True, unique=True, blank=True)
cspan_id = models.CharField(verbose_name="cspan unique identifier",
max_length=200, null=True, blank=True, unique=False)
wikipedia_id = models.CharField(verbose_name="wikipedia url",
max_length=500, default=None, null=True, blank=True)
ballotpedia_id = models.CharField(verbose_name="ballotpedia url",
max_length=500, default=None, null=True, blank=True)
house_history_id = models.CharField(verbose_name="house history unique identifier",
max_length=200, null=True, blank=True)
maplight_id = models.CharField(verbose_name="maplight unique identifier",
max_length=200, null=True, unique=True, blank=True)
washington_post_id = models.CharField(verbose_name="washington post unique identifier",
max_length=200, null=True, unique=False)
icpsr_id = models.CharField(verbose_name="icpsr unique identifier",
max_length=200, null=True, unique=False)
tag_link = models.ManyToManyField(Tag, through='PoliticianTagLink')
# The full name of the party the official belongs to.
political_party = models.CharField(verbose_name="politician political party", max_length=255, null=True)
state_code = models.CharField(verbose_name="politician home state", max_length=2, null=True)
politician_url = models.URLField(
verbose_name='latest website url of politician', max_length=255, blank=True, null=True)
politician_twitter_handle = models.CharField(
verbose_name='politician twitter screen_name', max_length=255, null=True, unique=False)
vote_usa_politician_id = models.CharField(
verbose_name="Vote USA permanent id for this candidate", max_length=64, default=None, null=True, blank=True)
we_vote_hosted_profile_image_url_large = models.URLField(verbose_name='we vote hosted large image url',
blank=True, null=True)
we_vote_hosted_profile_image_url_medium = models.URLField(verbose_name='we vote hosted medium image url',
blank=True, null=True)
we_vote_hosted_profile_image_url_tiny = models.URLField(verbose_name='we vote hosted tiny image url',
blank=True, null=True)
# ctcl politician fields
ctcl_uuid = models.CharField(verbose_name="ctcl uuid", max_length=36, null=True, blank=True)
politician_facebook_id = models.CharField(verbose_name='politician facebook user name', max_length=255, null=True,
unique=False)
politician_phone_number = models.CharField(verbose_name='politician phone number', max_length=255, null=True,
unique=False)
politician_googleplus_id = models.CharField(verbose_name='politician googleplus profile name', max_length=255,
null=True, unique=False)
politician_youtube_id = models.CharField(verbose_name='politician youtube profile name', max_length=255, null=True,
unique=False)
politician_email_address = models.CharField(verbose_name='politician email address', max_length=80, null=True,
unique=False)
date_last_updated = models.DateTimeField(null=True, auto_now=True)
# We override the save function so we can auto-generate we_vote_id
def save(self, *args, **kwargs):
# Even if this data came from another source we still need a unique we_vote_id
if self.we_vote_id:
self.we_vote_id = self.we_vote_id.strip().lower()
if self.we_vote_id == "" or self.we_vote_id is None: # If there isn't a value...
# ...generate a new id
site_unique_id_prefix = fetch_site_unique_id_prefix()
next_local_integer = fetch_next_we_vote_id_politician_integer()
# "wv" = We Vote
# site_unique_id_prefix = a generated (or assigned) unique id for one server running We Vote
# "pol" = tells us this is a unique id for a Politician
# next_integer = a unique, sequential integer for this server - not necessarily tied to database id
self.we_vote_id = "wv{site_unique_id_prefix}pol{next_integer}".format(
site_unique_id_prefix=site_unique_id_prefix,
next_integer=next_local_integer,
)
if self.maplight_id == "": # We want this to be unique IF there is a value, and otherwise "None"
self.maplight_id = None
super(Politician, self).save(*args, **kwargs)
def __unicode__(self):
return self.last_name
class Meta:
ordering = ('last_name',)
def display_full_name(self):
if self.politician_name:
return self.politician_name
elif self.first_name and self.last_name:
return self.first_name + " " + self.last_name
elif self.google_civic_candidate_name:
return self.google_civic_candidate_name
else:
return self.first_name + " " + self.last_name
def politician_photo_url(self):
"""
fetch URL of politician's photo from TheUnitedStatesIo repo
"""
if self.bioguide_id:
url_str = 'https://theunitedstates.io/images/congress/225x275/{bioguide_id}.jpg'.format(
bioguide_id=self.bioguide_id)
return url_str
else:
return ""
def is_female(self):
return self.gender in [FEMALE]
def is_gender_neutral(self):
return self.gender in [GENDER_NEUTRAL]
def is_male(self):
return self.gender in [MALE]
def is_gender_specified(self):
return self.gender in [FEMALE, GENDER_NEUTRAL, MALE]
class PoliticiansAreNotDuplicates(models.Model):
"""
When checking for duplicates, there are times when we want to explicitly mark two politicians as NOT duplicates
"""
politician1_we_vote_id = models.CharField(
verbose_name="first politician we are tracking", max_length=255, null=True, unique=False)
politician2_we_vote_id = models.CharField(
verbose_name="second politician we are tracking", max_length=255, null=True, unique=False)
def fetch_other_politician_we_vote_id(self, one_we_vote_id):
if one_we_vote_id == self.politician1_we_vote_id:
return self.politician2_we_vote_id
elif one_we_vote_id == self.politician2_we_vote_id:
return self.politician1_we_vote_id
else:
# If the we_vote_id passed in wasn't found, don't return another we_vote_id
return ""
class PoliticianManager(models.Manager):
def __init__(self):
pass
def politician_photo_url(self, politician_id):
politician_manager = PoliticianManager()
results = politician_manager.retrieve_politician(politician_id)
if results['success']:
politician = results['politician']
return politician.politician_photo_url()
return ""
def retrieve_politician(self, politician_id=0, we_vote_id=None, read_only=False):
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
politician = None
politician_found = False
politician_id = 0
politician_we_vote_id = ""
success = True
status = ''
try:
if positive_value_exists(politician_id):
if positive_value_exists(read_only):
politician = Politician.objects.using('readonly').get(id=politician_id)
else:
politician = Politician.objects.get(id=politician_id)
politician_id = politician.id
politician_we_vote_id = politician.we_vote_id
politician_found = True
elif positive_value_exists(we_vote_id):
if positive_value_exists(read_only):
politician = Politician.objects.using('readonly').get(we_vote_id__iexact=we_vote_id)
else:
politician = Politician.objects.get(we_vote_id__iexact=we_vote_id)
politician_id = politician.id
politician_we_vote_id = politician.we_vote_id
politician_found = True
except Politician.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status += "MULTIPLE_POLITICIANS_FOUND "
except Politician.DoesNotExist:
error_result = True
exception_does_not_exist = True
status += "NO_POLITICIAN_FOUND "
except Exception as e:
success = False
status += "PROBLEM_WITH_RETRIEVE_POLITICIAN: " + str(e) + ' '
results = {
'success': success,
'status': status,
'politician': politician,
'politician_found': politician_found,
'politician_id': politician_id,
'politician_we_vote_id': politician_we_vote_id,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
def retrieve_politician_from_we_vote_id(self, politician_we_vote_id):
return self.retrieve_politician(0, politician_we_vote_id)
def retrieve_all_politicians_that_might_match_candidate(
self,
candidate_name='',
candidate_twitter_handle='',
google_civic_candidate_name='',
google_civic_candidate_name2='',
google_civic_candidate_name3='',
maplight_id='',
state_code='',
vote_smart_id='',
vote_usa_politician_id='',
):
politician_list = []
politician_list_found = False
politician = Politician()
politician_found = False
status = ''
try:
filter_set = False
politician_queryset = Politician.objects.all()
filters = []
if positive_value_exists(vote_smart_id):
new_filter = Q(vote_smart_id__iexact=vote_smart_id)
filter_set = True
filters.append(new_filter)
if positive_value_exists(vote_usa_politician_id):
new_filter = Q(vote_usa_politician_id__iexact=vote_usa_politician_id)
filter_set = True
filters.append(new_filter)
if positive_value_exists(maplight_id):
new_filter = Q(maplight_id__iexact=maplight_id)
filter_set = True
filters.append(new_filter)
if positive_value_exists(candidate_twitter_handle):
new_filter = Q(politician_twitter_handle__iexact=candidate_twitter_handle)
filter_set = True
filters.append(new_filter)
if positive_value_exists(candidate_name) and positive_value_exists(state_code):
new_filter = Q(politician_name__iexact=candidate_name,
state_code__iexact=state_code)
filter_set = True
filters.append(new_filter)
if positive_value_exists(google_civic_candidate_name) and positive_value_exists(state_code):
new_filter = Q(politician_name__iexact=google_civic_candidate_name,
state_code__iexact=state_code)
filter_set = True
filters.append(new_filter)
if positive_value_exists(google_civic_candidate_name2) and positive_value_exists(state_code):
new_filter = Q(politician_name__iexact=google_civic_candidate_name2,
state_code__iexact=state_code)
filter_set = True
filters.append(new_filter)
if positive_value_exists(google_civic_candidate_name3) and positive_value_exists(state_code):
new_filter = Q(politician_name__iexact=google_civic_candidate_name3,
state_code__iexact=state_code)
filter_set = True
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
politician_queryset = politician_queryset.filter(final_filters)
if filter_set:
politician_list = politician_queryset
else:
politician_list = []
if len(politician_list) == 1:
politician_found = True
politician_list_found = False
politician = politician_list[0]
status += 'ONE_POLITICIAN_RETRIEVED '
elif len(politician_list):
politician_found = False
politician_list_found = True
status += 'POLITICIAN_LIST_RETRIEVED '
else:
status += 'NO_POLITICIANS_RETRIEVED '
success = True
except Exception as e:
status = 'FAILED retrieve_all_politicians_for_office ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
# TODO DALE If nothing found, look for a national entry for this candidate -- i.e. Presidential candidates
if not politician_found and not politician_list_found:
pass
results = {
'success': success,
'status': status,
'politician_list_found': politician_list_found,
'politician_list': politician_list,
'politician_found': politician_found,
'politician': politician,
}
return results
def reset_politician_image_details_from_candidate(self, candidate, twitter_profile_image_url_https,
twitter_profile_background_image_url_https,
twitter_profile_banner_url_https):
"""
Reset an Politician entry with original image details from we vote image.
:param candidate:
:param twitter_profile_image_url_https:
:param twitter_profile_background_image_url_https:
:param twitter_profile_banner_url_https:
:return:
"""
politician_details = self.retrieve_politician(0, candidate.politician_we_vote_id)
politician = politician_details['politician']
if politician_details['success']:
politician.we_vote_hosted_profile_image_url_medium = ''
politician.we_vote_hosted_profile_image_url_large = ''
politician.we_vote_hosted_profile_image_url_tiny = ''
politician.save()
success = True
status = "RESET_POLITICIAN_IMAGE_DETAILS"
else:
success = False
status = "POLITICIAN_NOT_FOUND_IN_RESET_IMAGE_DETAILS"
results = {
'success': success,
'status': status,
'politician': politician
}
return results
def search_politicians(self, name_search_terms=None):
status = ""
success = True
politician_search_results_list = []
try:
queryset = Politician.objects.all()
if name_search_terms is not None:
name_search_words = name_search_terms.split()
else:
name_search_words = []
for one_word in name_search_words:
filters = [] # Reset for each search word
new_filter = Q(politician_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(politician_twitter_handle__icontains=one_word)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
queryset = queryset.filter(final_filters)
politician_search_results_list = list(queryset)
except Exception as e:
success = False
status += "ERROR_SEARCHING_POLITICIANS: " + str(e) + " "
results = {
'status': status,
'success': success,
'politician_search_results_list': politician_search_results_list,
}
return results
def update_politician_details_from_candidate(self, candidate):
"""
Update a politician entry with details retrieved from candidate
:param candidate:
:return:
"""
values_changed = False
politician_details = self.retrieve_politician(0, candidate.politician_we_vote_id)
politician = politician_details['politician']
if politician_details['success'] and politician:
# Politician found so update politician details with candidate details
first_name = extract_first_name_from_full_name(candidate.candidate_name)
middle_name = extract_middle_name_from_full_name(candidate.candidate_name)
last_name = extract_last_name_from_full_name(candidate.candidate_name)
if positive_value_exists(first_name) and first_name != politician.first_name:
politician.first_name = first_name
values_changed = True
if positive_value_exists(last_name) and last_name != politician.last_name:
politician.last_name = last_name
values_changed = True
if positive_value_exists(middle_name) and middle_name != politician.middle_name:
politician.middle_name = middle_name
values_changed = True
if positive_value_exists(candidate.party):
if convert_to_political_party_constant(candidate.party) != politician.political_party:
politician.political_party = convert_to_political_party_constant(candidate.party)
values_changed = True
if positive_value_exists(candidate.vote_smart_id) and candidate.vote_smart_id != politician.vote_smart_id:
politician.vote_smart_id = candidate.vote_smart_id
values_changed = True
if positive_value_exists(candidate.maplight_id) and candidate.maplight_id != politician.maplight_id:
politician.maplight_id = candidate.maplight_id
values_changed = True
if positive_value_exists(candidate.candidate_name) and \
candidate.candidate_name != politician.politician_name:
politician.politician_name = candidate.candidate_name
values_changed = True
if positive_value_exists(candidate.google_civic_candidate_name) and \
candidate.google_civic_candidate_name != politician.google_civic_candidate_name:
politician.google_civic_candidate_name = candidate.google_civic_candidate_name
values_changed = True
if positive_value_exists(candidate.state_code) and candidate.state_code != politician.state_code:
politician.state_code = candidate.state_code
values_changed = True
if positive_value_exists(candidate.candidate_twitter_handle) and \
candidate.candidate_twitter_handle != politician.politician_twitter_handle:
politician.politician_twitter_handle = candidate.candidate_twitter_handle
values_changed = True
if positive_value_exists(candidate.we_vote_hosted_profile_image_url_large) and \
candidate.we_vote_hosted_profile_image_url_large != \
politician.we_vote_hosted_profile_image_url_large:
politician.we_vote_hosted_profile_image_url_large = candidate.we_vote_hosted_profile_image_url_large
values_changed = True
if positive_value_exists(candidate.we_vote_hosted_profile_image_url_medium) and \
candidate.we_vote_hosted_profile_image_url_medium != \
politician.we_vote_hosted_profile_image_url_medium:
politician.we_vote_hosted_profile_image_url_medium = candidate.we_vote_hosted_profile_image_url_medium
values_changed = True
if positive_value_exists(candidate.we_vote_hosted_profile_image_url_tiny) and \
candidate.we_vote_hosted_profile_image_url_tiny != politician.we_vote_hosted_profile_image_url_tiny:
politician.we_vote_hosted_profile_image_url_tiny = candidate.we_vote_hosted_profile_image_url_tiny
values_changed = True
if values_changed:
politician.save()
success = True
status = "SAVED_POLITICIAN_DETAILS"
else:
success = True
status = "NO_CHANGES_SAVED_TO_POLITICIAN_DETAILS"
else:
success = False
status = "POLITICIAN_NOT_FOUND"
results = {
'success': success,
'status': status,
'politician': politician
}
return results
def update_or_create_politician_from_candidate(self, candidate):
"""
Take a We Vote candidate object, and map it to update_or_create_politician
:param candidate:
:return:
"""
first_name = extract_first_name_from_full_name(candidate.candidate_name)
middle_name = extract_middle_name_from_full_name(candidate.candidate_name)
last_name = extract_last_name_from_full_name(candidate.candidate_name)
political_party = convert_to_political_party_constant(candidate.party)
# TODO Add all other identifiers from other systems
updated_politician_values = {
'vote_smart_id': candidate.vote_smart_id,
'vote_usa_politician_id': candidate.vote_usa_politician_id,
'maplight_id': candidate.maplight_id,
'politician_name': candidate.candidate_name,
'google_civic_candidate_name': candidate.google_civic_candidate_name,
'state_code': candidate.state_code,
'politician_twitter_handle': candidate.candidate_twitter_handle,
'we_vote_hosted_profile_image_url_large': candidate.we_vote_hosted_profile_image_url_large,
'we_vote_hosted_profile_image_url_medium': candidate.we_vote_hosted_profile_image_url_medium,
'we_vote_hosted_profile_image_url_tiny': candidate.we_vote_hosted_profile_image_url_tiny,
'first_name': first_name,
'middle_name': middle_name,
'last_name': last_name,
'political_party': political_party,
}
return self.update_or_create_politician(
updated_politician_values=updated_politician_values,
politician_we_vote_id=candidate.politician_we_vote_id,
vote_usa_politician_id=candidate.vote_usa_politician_id,
candidate_twitter_handle=candidate.candidate_twitter_handle,
candidate_name=candidate.candidate_name,
state_code=candidate.state_code)
def update_or_create_politician(
self,
updated_politician_values={},
politician_we_vote_id='',
vote_smart_id=0,
vote_usa_politician_id='',
maplight_id="",
candidate_twitter_handle="",
candidate_name="",
state_code="",
first_name="",
middle_name="",
last_name=""):
"""
Either update or create a politician entry. The individual variables passed in are for the purpose of finding
a politician to update, and the updated_politician_values variable contains the values we want to update to.
"""
new_politician_created = False
politician_found = False
politician = Politician()
status = ''
try:
# Note: When we decide to start updating candidate_name elsewhere within We Vote, we should stop
# updating candidate_name via subsequent Google Civic imports
# If coming from a record that has already been in We Vote
if positive_value_exists(politician_we_vote_id):
politician, new_politician_created = \
Politician.objects.update_or_create(
we_vote_id__iexact=politician_we_vote_id,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(vote_smart_id):
politician, new_politician_created = \
Politician.objects.update_or_create(
vote_smart_id=vote_smart_id,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(vote_usa_politician_id):
politician, new_politician_created = \
Politician.objects.update_or_create(
vote_usa_politician_id=vote_usa_politician_id,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(candidate_twitter_handle):
politician, new_politician_created = \
Politician.objects.update_or_create(
politician_twitter_handle__iexact=candidate_twitter_handle,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(candidate_name) and positive_value_exists(state_code):
state_code = state_code.lower()
politician, new_politician_created = \
Politician.objects.update_or_create(
politician_name=candidate_name,
state_code=state_code,
defaults=updated_politician_values)
politician_found = True
elif positive_value_exists(first_name) and positive_value_exists(last_name) \
and positive_value_exists(state_code):
state_code = state_code.lower()
politician, new_politician_created = \
Politician.objects.update_or_create(
first_name=first_name,
last_name=last_name,
state_code=state_code,
defaults=updated_politician_values)
politician_found = True
else:
# If here we have exhausted our set of unique identifiers
politician_found = False
pass
success = True
if politician_found:
status += 'POLITICIAN_SAVED '
else:
status += 'POLITICIAN_NOT_SAVED '
except Exception as e:
success = False
status = 'UNABLE_TO_UPDATE_OR_CREATE_POLITICIAN: ' + str(e) + ' '
results = {
'success': success,
'status': status,
'politician_created': new_politician_created,
'politician_found': politician_found,
'politician': politician,
}
return results
def fetch_politician_id_from_we_vote_id(self, we_vote_id):
politician_id = 0
politician_manager = PoliticianManager()
results = politician_manager.retrieve_politician(politician_id, we_vote_id)
if results['success']:
return results['politician_id']
return 0
def fetch_politician_we_vote_id_from_id(self, politician_id):
we_vote_id = ''
politician_manager = PoliticianManager()
results = politician_manager.retrieve_politician(politician_id, we_vote_id)
if results['success']:
return results['politician_we_vote_id']
return ''
def fetch_politicians_are_not_duplicates_list_we_vote_ids(self, politician_we_vote_id):
results = self.retrieve_politicians_are_not_duplicates_list(politician_we_vote_id)
return results['politicians_are_not_duplicates_list_we_vote_ids']
def create_politician_row_entry(self, politician_name, politician_first_name, politician_middle_name,
politician_last_name, ctcl_uuid, political_party, politician_email_address,
politician_phone_number, politician_twitter_handle, politician_facebook_id,
politician_googleplus_id, politician_youtube_id, politician_website_url):
"""
Create Politician table entry with Politician details
:param politician_name:
:param politician_first_name:
:param politician_middle_name:
:param politician_last_name:
:param ctcl_uuid:
:param political_party:
:param politician_email_address:
:param politician_phone_number:
:param politician_twitter_handle:
:param politician_facebook_id:
:param politician_googleplus_id:
:param politician_youtube_id:
:param politician_website_url:
:return:
"""
success = False
status = ""
politician_updated = False
new_politician_created = False
new_politician = ''
try:
new_politician = Politician.objects.create(politician_name=politician_name,
first_name=politician_first_name,
middle_name=politician_middle_name,
last_name=politician_last_name, political_party=political_party,
politician_email_address=politician_email_address,
politician_phone_number=politician_phone_number,
politician_twitter_handle=politician_twitter_handle,
politician_facebook_id=politician_facebook_id,
politician_googleplus_id=politician_googleplus_id,
politician_youtube_id=politician_youtube_id,
politician_url=politician_website_url, ctcl_uuid=ctcl_uuid)
if new_politician:
success = True
status += "POLITICIAN_CREATED "
new_politician_created = True
else:
success = False
status += "POLITICIAN_CREATE_FAILED "
except Exception as e:
success = False
new_politician_created = False
status += "POLITICIAN_RETRIEVE_ERROR "
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
'new_politician_created': new_politician_created,
'politician_updated': politician_updated,
'new_politician': new_politician,
}
return results
def update_politician_row_entry(self, politician_name, politician_first_name, politician_middle_name,
politician_last_name, ctcl_uuid, political_party, politician_email_address,
politician_twitter_handle, politician_phone_number, politician_facebook_id,
politician_googleplus_id, politician_youtube_id, politician_website_url,
politician_we_vote_id):
"""
Update Politician table entry with matching we_vote_id
:param politician_name:
:param politician_first_name:
:param politician_middle_name:
:param politician_last_name:
:param ctcl_uuid:
:param political_party:
:param politician_email_address:
:param politician_twitter_handle:
:param politician_phone_number:
:param politician_facebook_id:
:param politician_googleplus_id:
:param politician_youtube_id:
:param politician_website_url:
:param politician_we_vote_id:
:return:
"""
success = False
status = ""
politician_updated = False
# new_politician_created = False
# new_politician = ''
existing_politician_entry = ''
try:
existing_politician_entry = Politician.objects.get(we_vote_id__iexact=politician_we_vote_id)
if existing_politician_entry:
# found the existing entry, update the values
existing_politician_entry.politician_name = politician_name
existing_politician_entry.first_name = politician_first_name
existing_politician_entry.middle_name = politician_middle_name
existing_politician_entry.last_name = politician_last_name
existing_politician_entry.party_name = political_party
existing_politician_entry.ctcl_uuid = ctcl_uuid
existing_politician_entry.politician_phone_number = politician_phone_number
existing_politician_entry.twitter_handle = politician_twitter_handle
existing_politician_entry.politician_facebook_id = politician_facebook_id
existing_politician_entry.politician_googleplus_id = politician_googleplus_id
existing_politician_entry.politician_youtube_id = politician_youtube_id
existing_politician_entry.politician_url = politician_website_url
existing_politician_entry.politician_email_address = politician_email_address
# now go ahead and save this entry (update)
existing_politician_entry.save()
politician_updated = True
success = True
status = "POLITICIAN_UPDATED"
except Exception as e:
success = False
politician_updated = False
status = "POLITICIAN_RETRIEVE_ERROR"
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
'politician_updated': politician_updated,
'updated_politician': existing_politician_entry,
}
return results
# def delete_all_politician_data():
# with open(LEGISLATORS_CURRENT_FILE, 'rU') as politicians_current_data:
# politicians_current_data.readline() # Skip the header
# reader = csv.reader(politicians_current_data) # Create a regular tuple reader
# for index, politician_row in enumerate(reader):
# if index > 3:
# break
# politician_entry = Politician.objects.order_by('last_name')[0]
# politician_entry.delete()
def retrieve_politicians(
self,
limit_to_this_state_code="",
read_only=False,
):
"""
:param limit_to_this_state_code:
:param read_only:
:return:
"""
status = ""
politician_list = []
politician_list_found = False
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
if positive_value_exists(limit_to_this_state_code):
politician_query = politician_query.filter(state_code__iexact=limit_to_this_state_code)
politician_list = list(politician_query)
if len(politician_list):
politician_list_found = True
status += 'POLITICIANS_RETRIEVED '
success = True
else:
status += 'NO_POLITICIANS_RETRIEVED '
success = True
except Politician.DoesNotExist:
# No politicians found. Not a problem.
status += 'NO_POLITICIANS_FOUND_DoesNotExist '
politician_list_objects = []
success = True
except Exception as e:
handle_exception(e, logger=logger)
status += 'FAILED-retrieve_politicians_for_specific_elections: ' + str(e) + ' '
success = False
results = {
'success': success,
'status': status,
'politician_list_found': politician_list_found,
'politician_list': politician_list,
}
return results
def retrieve_politicians_from_non_unique_identifiers(
self,
state_code='',
politician_twitter_handle='',
politician_name='',
ignore_politician_id_list=[],
read_only=False):
"""
:param state_code:
:param politician_twitter_handle:
:param politician_name:
:param ignore_politician_id_list:
:param read_only:
:return:
"""
keep_looking_for_duplicates = True
politician = None
politician_found = False
politician_list = []
politician_list_found = False
politician_twitter_handle = extract_twitter_handle_from_text_string(politician_twitter_handle)
multiple_entries_found = False
success = True
status = ""
if keep_looking_for_duplicates and positive_value_exists(politician_twitter_handle):
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(politician_twitter_handle__iexact=politician_twitter_handle)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_list = list(politician_query)
if len(politician_list):
# At least one entry exists
status += 'RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_LIST_RETRIEVED '
# if a single entry matches, update that entry
if len(politician_list) == 1:
multiple_entries_found = False
politician = politician_list[0]
politician_found = True
keep_looking_for_duplicates = False
success = True
status += "POLITICIAN_FOUND_BY_TWITTER "
else:
# more than one entry found
politician_list_found = True
multiple_entries_found = True
keep_looking_for_duplicates = False # Deal with multiple Twitter duplicates manually
status += "MULTIPLE_TWITTER_MATCHES "
except Politician.DoesNotExist:
success = True
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_NOT_FOUND "
except Exception as e:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_QUERY_FAILED1 " + str(e) + " "
success = False
keep_looking_for_duplicates = False
# twitter handle does not exist, next look up against other data that might match
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search by Candidate name exact match
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(
Q(politician_name__iexact=politician_name) |
Q(google_civic_candidate_name__iexact=politician_name) |
Q(google_civic_candidate_name2__iexact=politician_name) |
Q(google_civic_candidate_name3__iexact=politician_name)
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_list = list(politician_query)
if len(politician_list):
# entry exists
status += 'POLITICIAN_ENTRY_EXISTS1 '
success = True
# if a single entry matches, update that entry
if len(politician_list) == 1:
politician = politician_list[0]
politician_found = True
status += politician.we_vote_id + " "
keep_looking_for_duplicates = False
else:
# more than one entry found with a match in Politician
politician_list_found = True
keep_looking_for_duplicates = False
multiple_entries_found = True
else:
success = True
status += 'POLITICIAN_ENTRY_NOT_FOUND-EXACT '
except Politician.DoesNotExist:
success = True
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_NOT_FOUND-EXACT_MATCH "
except Exception as e:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_QUERY_FAILED2: " + str(e) + " "
success = False
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search for Candidate(s) that contains the same first and last names
first_name = extract_first_name_from_full_name(politician_name)
last_name = extract_last_name_from_full_name(politician_name)
if positive_value_exists(first_name) and positive_value_exists(last_name):
try:
if positive_value_exists(read_only):
politician_query = Politician.objects.using('readonly').all()
else:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(
(Q(politician_name__icontains=first_name) & Q(politician_name__icontains=last_name)) |
(Q(google_civic_candidate_name__icontains=first_name) &
Q(google_civic_candidate_name__icontains=last_name)) |
(Q(google_civic_candidate_name2__icontains=first_name) &
Q(google_civic_candidate_name2__icontains=last_name)) |
(Q(google_civic_candidate_name3__icontains=first_name) &
Q(google_civic_candidate_name3__icontains=last_name))
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_list = list(politician_query)
if len(politician_list):
# entry exists
status += 'POLITICIAN_ENTRY_EXISTS2 '
success = True
# if a single entry matches, update that entry
if len(politician_list) == 1:
politician = politician_list[0]
politician_found = True
status += politician.we_vote_id + " "
keep_looking_for_duplicates = False
else:
# more than one entry found with a match in Politician
politician_list_found = True
keep_looking_for_duplicates = False
multiple_entries_found = True
else:
status += 'POLITICIAN_ENTRY_NOT_FOUND-FIRST_OR_LAST '
success = True
except Politician.DoesNotExist:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_NOT_FOUND-FIRST_OR_LAST_NAME "
success = True
except Exception as e:
status += "RETRIEVE_POLITICIANS_FROM_NON_UNIQUE-POLITICIAN_QUERY_FAILED3: " + str(e) + " "
success = False
results = {
'success': success,
'status': status,
'politician_found': politician_found,
'politician': politician,
'politician_list_found': politician_list_found,
'politician_list': politician_list,
'multiple_entries_found': multiple_entries_found,
}
return results
def fetch_politicians_from_non_unique_identifiers_count(
self,
state_code='',
politician_twitter_handle='',
politician_name='',
ignore_politician_id_list=[]):
keep_looking_for_duplicates = True
politician_twitter_handle = extract_twitter_handle_from_text_string(politician_twitter_handle)
status = ""
if keep_looking_for_duplicates and positive_value_exists(politician_twitter_handle):
try:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(politician_twitter_handle__iexact=politician_twitter_handle)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_count = politician_query.count()
if positive_value_exists(politician_count):
return politician_count
except Politician.DoesNotExist:
status += "FETCH_POLITICIANS_FROM_NON_UNIQUE_IDENTIFIERS_COUNT1 "
# twitter handle does not exist, next look up against other data that might match
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search by Candidate name exact match
try:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(
Q(politician_name__iexact=politician_name) |
Q(google_civic_candidate_name__iexact=politician_name) |
Q(google_civic_candidate_name2__iexact=politician_name) |
Q(google_civic_candidate_name3__iexact=politician_name)
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_count = politician_query.count()
if positive_value_exists(politician_count):
return politician_count
except Politician.DoesNotExist:
status += "FETCH_POLITICIANS_FROM_NON_UNIQUE_IDENTIFIERS_COUNT2 "
if keep_looking_for_duplicates and positive_value_exists(politician_name):
# Search for Candidate(s) that contains the same first and last names
first_name = extract_first_name_from_full_name(politician_name)
last_name = extract_last_name_from_full_name(politician_name)
if positive_value_exists(first_name) and positive_value_exists(last_name):
try:
politician_query = Politician.objects.all()
politician_query = politician_query.filter(
(Q(politician_name__icontains=first_name) & Q(politician_name__icontains=last_name)) |
(Q(google_civic_candidate_name__icontains=first_name) &
Q(google_civic_candidate_name__icontains=last_name)) |
(Q(google_civic_candidate_name2__icontains=first_name) &
Q(google_civic_candidate_name2__icontains=last_name)) |
(Q(google_civic_candidate_name3__icontains=first_name) &
Q(google_civic_candidate_name3__icontains=last_name))
)
if positive_value_exists(state_code):
politician_query = politician_query.filter(state_code__iexact=state_code)
if positive_value_exists(ignore_politician_id_list):
politician_query = politician_query.exclude(we_vote_id__in=ignore_politician_id_list)
politician_count = politician_query.count()
if positive_value_exists(politician_count):
return politician_count
except Politician.DoesNotExist:
status += "FETCH_POLITICIANS_FROM_NON_UNIQUE_IDENTIFIERS_COUNT3 "
return 0
def retrieve_politicians_are_not_duplicates_list(self, politician_we_vote_id, read_only=True):
"""
Get a list of other politician_we_vote_id's that are not duplicates
:param politician_we_vote_id:
:param read_only:
:return:
"""
# Note that the direction of the linkage does not matter
politicians_are_not_duplicates_list1 = []
politicians_are_not_duplicates_list2 = []
status = ""
try:
if positive_value_exists(read_only):
politicians_are_not_duplicates_list_query = \
PoliticiansAreNotDuplicates.objects.using('readonly').filter(
politician1_we_vote_id__iexact=politician_we_vote_id,
)
else:
politicians_are_not_duplicates_list_query = PoliticiansAreNotDuplicates.objects.filter(
politician1_we_vote_id__iexact=politician_we_vote_id,
)
politicians_are_not_duplicates_list1 = list(politicians_are_not_duplicates_list_query)
success = True
status += "POLITICIANS_NOT_DUPLICATES_LIST_UPDATED_OR_CREATED1 "
except PoliticiansAreNotDuplicates.DoesNotExist:
# No data found. Try again below
success = True
status += 'NO_POLITICIANS_NOT_DUPLICATES_LIST_RETRIEVED_DoesNotExist1 '
except Exception as e:
success = False
status += "POLITICIANS_NOT_DUPLICATES_LIST_NOT_UPDATED_OR_CREATED1: " + str(e) + ' '
if success:
try:
if positive_value_exists(read_only):
politicians_are_not_duplicates_list_query = \
PoliticiansAreNotDuplicates.objects.using('readonly').filter(
politician2_we_vote_id__iexact=politician_we_vote_id,
)
else:
politicians_are_not_duplicates_list_query = \
PoliticiansAreNotDuplicates.objects.filter(
politician2_we_vote_id__iexact=politician_we_vote_id,
)
politicians_are_not_duplicates_list2 = list(politicians_are_not_duplicates_list_query)
success = True
status += "POLITICIANS_NOT_DUPLICATES_LIST_UPDATED_OR_CREATED2 "
except PoliticiansAreNotDuplicates.DoesNotExist:
success = True
status += 'NO_POLITICIANS_NOT_DUPLICATES_LIST_RETRIEVED2_DoesNotExist2 '
except Exception as e:
success = False
status += "POLITICIANS_NOT_DUPLICATES_LIST_NOT_UPDATED_OR_CREATED2: " + str(e) + ' '
politicians_are_not_duplicates_list = \
politicians_are_not_duplicates_list1 + politicians_are_not_duplicates_list2
politicians_are_not_duplicates_list_found = positive_value_exists(len(politicians_are_not_duplicates_list))
politicians_are_not_duplicates_list_we_vote_ids = []
for one_entry in politicians_are_not_duplicates_list:
if one_entry.politician1_we_vote_id != politician_we_vote_id:
politicians_are_not_duplicates_list_we_vote_ids.append(one_entry.politician1_we_vote_id)
elif one_entry.politician2_we_vote_id != politician_we_vote_id:
politicians_are_not_duplicates_list_we_vote_ids.append(one_entry.politician2_we_vote_id)
results = {
'success': success,
'status': status,
'politicians_are_not_duplicates_list_found': politicians_are_not_duplicates_list_found,
'politicians_are_not_duplicates_list': politicians_are_not_duplicates_list,
'politicians_are_not_duplicates_list_we_vote_ids': politicians_are_not_duplicates_list_we_vote_ids,
}
return results
def retrieve_politicians_with_misformatted_names(self, start=0, count=15):
"""
Get the first 15 records that have 3 capitalized letters in a row, as long as those letters
are not 'III' i.e. King Henry III. Also exclude the names where the word "WITHDRAWN" has been appended when
the politician withdrew from the race
SELECT * FROM public.politician_politician WHERE politician_name ~ '.*?[A-Z][A-Z][A-Z].*?' and
politician_name !~ '.*?III.*?'
:param start:
:return:
"""
politician_query = Politician.objects.all()
# Get all politicians that have three capital letters in a row in their name, but exclude III (King Henry III)
politician_query = politician_query.filter(politician_name__regex=r'.*?[A-Z][A-Z][A-Z].*?(?<!III)').\
order_by('politician_name')
number_of_rows = politician_query.count()
politician_query = politician_query[start:(start+count)]
politician_list_objects = list(politician_query)
results_list = []
# out = ''
# out = 'KING HENRY III => ' + display_full_name_with_correct_capitalization('KING HENRY III') + ", "
for x in politician_list_objects:
name = x.politician_name
if name.endswith('WITHDRAWN') and not bool(re.match('^[A-Z]+$', name)):
continue
x.person_name_normalized = display_full_name_with_correct_capitalization(name)
x.party = x.political_party
results_list.append(x)
# out += name + ' = > ' + x.person_name_normalized + ', '
return results_list, number_of_rows
def update_or_create_politicians_are_not_duplicates(self, politician1_we_vote_id, politician2_we_vote_id):
"""
Either update or create a politician entry.
"""
exception_multiple_object_returned = False
success = False
new_politicians_are_not_duplicates_created = False
politicians_are_not_duplicates = None
status = ""
if positive_value_exists(politician1_we_vote_id) and positive_value_exists(politician2_we_vote_id):
try:
updated_values = {
'politician1_we_vote_id': politician1_we_vote_id,
'politician2_we_vote_id': politician2_we_vote_id,
}
politicians_are_not_duplicates, new_politicians_are_not_duplicates_created = \
PoliticiansAreNotDuplicates.objects.update_or_create(
politician1_we_vote_id__exact=politician1_we_vote_id,
politician2_we_vote_id__iexact=politician2_we_vote_id,
defaults=updated_values)
success = True
status += "POLITICIANS_ARE_NOT_DUPLICATES_UPDATED_OR_CREATED "
except PoliticiansAreNotDuplicates.MultipleObjectsReturned as e:
success = False
status += 'MULTIPLE_MATCHING_POLITICIANS_ARE_NOT_DUPLICATES_FOUND_BY_POLITICIAN_WE_VOTE_ID '
exception_multiple_object_returned = True
except Exception as e:
status += 'EXCEPTION_UPDATE_OR_CREATE_POLITICIANS_ARE_NOT_DUPLICATES ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e))
success = False
results = {
'success': success,
'status': status,
'MultipleObjectsReturned': exception_multiple_object_returned,
'new_politicians_are_not_duplicates_created': new_politicians_are_not_duplicates_created,
'politicians_are_not_duplicates': politicians_are_not_duplicates,
}
return results
class PoliticianTagLink(models.Model):
"""
A confirmed (undisputed) link between tag & item of interest.
"""
tag = models.ForeignKey(Tag, null=False, blank=False, verbose_name='tag unique identifier',
on_delete=models.deletion.DO_NOTHING)
politician = models.ForeignKey(Politician, null=False, blank=False, verbose_name='politician unique identifier',
on_delete=models.deletion.DO_NOTHING)
# measure_id
# office_id
# issue_id
class PoliticianTagLinkDisputed(models.Model):
"""
This is a highly disputed link between tag & item of interest. Generated from 'tag_added', and tag results
are only shown to people within the cloud of the voter who posted
We split off how things are tagged to avoid conflict wars between liberals & conservatives
(Deal with some tags visible in some networks, and not in others - ex/ #ObamaSucks)
"""
tag = models.ForeignKey(Tag, null=False, blank=False, verbose_name='tag unique identifier',
on_delete=models.deletion.DO_NOTHING)
politician = models.ForeignKey(Politician, null=False, blank=False, verbose_name='politician unique identifier',
on_delete=models.deletion.DO_NOTHING)
# measure_id
# office_id
# issue_id
| |
from django.views import generic
from django.http import Http404, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.contrib.auth import login, mixins, get_user
from django.contrib.auth.forms import AuthenticationForm
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from .utils import upload_file
from .models import AccessLog, Document, Revision, Space
from .forms import DocumentForm, RevisionInlineFormset, UploadFileForm
from .mixins import (
DeletePermissionRequiredMixin,
EditPermissionRequiredMixin,
ViewPermissionRequiredMixin)
class GenericDocView(generic.DetailView):
""" Generic view for document details. """
rev = None
model = Document
template_name = 'spaces/document/view.html'
def get_object(self):
try:
document = Document.objects.get_by_path(self.kwargs["path"])
except ObjectDoesNotExist:
document = None
return document
def get_context_data(self, **kwargs):
document = self.get_object()
context = super(GenericDocView, self).get_context_data(**kwargs)
# Document version
if self.rev:
context["revision"] = self.rev
context["is_latest"] = (self.rev.id == document.latest.id)
elif document:
context["revision"] = document.latest
context["is_latest"] = True
# General space list
context["general_spaces"] = Space.objects.exclude(
name__in=[Space.ROOT_SPACE_NAME, Space.USER_SPACE_NAME])
# Breadcrumbs
context["path_documents"] = []
if document:
parent = document.parent
if not (document.is_space_root and document.space.name == Space.ROOT_SPACE_NAME):
context["path_documents"].insert(0, document)
while parent is not None:
context["path_documents"].insert(0, parent)
parent = parent.parent
return context
class DocDetailView(ViewPermissionRequiredMixin, GenericDocView):
""" Display the document. """
def get(self, request, *args, **kwargs):
""" Add an access log for every view. """
document = self.get_object()
user = get_user(request)
# Add access log
if user.is_anonymous():
user = None
if document:
AccessLog.objects.create(document=document, user=user)
return super(DocDetailView, self).get(request, *args, **kwargs)
class DocInfoView(ViewPermissionRequiredMixin, GenericDocView):
""" Show info and revisions for this document """
template_name = 'spaces/document/info.html'
class DocCreateView(EditPermissionRequiredMixin, generic.edit.UpdateView):
""" Create a new document """
form_class = DocumentForm
template_name = 'spaces/document/edit.html'
def get_object(self):
path = self.kwargs["path"]
doc = Document(path=path)
return doc
def get_form_kwargs(self):
""" Add user to the kwargs sent to DocumentForm """
kwargs = super(DocCreateView, self).get_form_kwargs()
kwargs["user"] = self.user
return kwargs
def _setup_forms(self, request, post=None):
self.user = request.user
self.object = self.get_object()
rev_qs = self.object.revision_set.all()
if rev_qs.count():
rev_qs = rev_qs.filter(pk=rev_qs[0].pk)
form = self.get_form(self.get_form_class())
revision_form = RevisionInlineFormset(
post,
instance=self.object,
queryset=rev_qs,
user=self.user)
return (form, revision_form, )
def get(self, request, *args, **kwargs):
""" Handle GET requests. """
form, revision_form = self._setup_forms(request)
return self.render_to_response(
self.get_context_data(
base_path=self.get_object().full_path(False),
form=form,
revision_form=revision_form))
def post(self, request, *args, **kwargs):
""" Handle POST requests. """
form, revision_form = self._setup_forms(request, request.POST)
self.object.parent = None # Parent is defined by path
if (form.is_valid() and revision_form.is_valid()):
return self.form_valid(form, revision_form)
return self.form_invalid(form, revision_form)
def form_valid(self, form, revision_form):
""" All good. Finish up and save. """
self.object = form.save()
revision_form.instance = self.object
revision_form.save()
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form, revision_form):
"""
Called if a form is invalid. Re-renders the context data with the
data-filled forms and errors.
"""
return self.render_to_response(
self.get_context_data(
base_path=self.get_object().full_path(False),
form=form,
revision_form=revision_form))
class DocUpdateView(DocCreateView):
""" Edit a document. """
def get_object(self):
try:
doc = Document.objects.get_by_path(self.kwargs["path"])
return doc
except ObjectDoesNotExist:
raise Http404
class DocDeleteView(DeletePermissionRequiredMixin, generic.edit.DeleteView):
""" Delete a document. """
model = Document
def post(self, request, *args, **kwargs):
object = self.get_object()
# Redirect to the parent page
self.success_url = reverse(
'spaces:document',
kwargs={"path": object.parent.full_path()})
return super(DocDeleteView, self).post(request, *args, **kwargs)
class RevisionView(GenericDocView):
""" View a specific document revision. """
def get_object(self):
try:
self.rev = Revision.objects.get(pk=self.kwargs["pk"])
except ObjectDoesNotExist:
raise Http404
return self.rev.doc
class LoginView(generic.edit.FormView):
""" Login form. """
form_class = AuthenticationForm
template_name = 'spaces/login.html'
def form_valid(self, form):
self.success_url = reverse('spaces:document', kwargs={"path": ""})
login(self.request, form.get_user())
return super(LoginView, self).form_valid(form)
@csrf_exempt
def file_upload_view(request):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
path = upload_file(request.FILES['file'])
return JsonResponse({'link': path})
else:
form = UploadFileForm()
return render(request, 'spaces/file_upload.html', {'form': form})
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""In-Graph Beam Search Implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest # pylint: disable=E0611
class BeamSearchState(
namedtuple("BeamSearchState", ["log_probs", "finished", "lengths"])):
"""State for a single step of beam search.
Args:
log_probs: The current log probabilities of all beams
finished: A boolean vector that specifies which beams are finished
lengths: Lengths of all beams
"""
pass
class BeamSearchStepOutput(
namedtuple("BeamSearchStepOutput",
["scores", "predicted_ids", "beam_parent_ids"])):
"""Outputs for a single step of beam search.
Args:
scores: Score for each beam, a float32 vector
predicted_ids: predictions for this step step, an int32 vector
beam_parent_ids: an int32 vector containing the beam indices of the
continued beams from the previous step
"""
pass
class BeamSearchConfig(
namedtuple("BeamSearchConfig", [
"beam_width", "vocab_size", "eos_token", "length_penalty_weight",
"choose_successors_fn"
])):
"""Configuration object for beam search.
Args:
beam_width: Number of beams to use, an integer
vocab_size: Output vocabulary size
eos_token: The id of the EOS token, used to mark beams as "done"
length_penalty_weight: Weight for the length penalty factor. 0.0 disables
the penalty.
choose_successors_fn: A function used to choose beam successors based
on their scores. Maps from (scores, config) => (chosen scores, chosen_ids)
"""
pass
def gather_tree_py(values, parents):
"""Gathers path through a tree backwards from the leave nodes. Used
to reconstruct beams given their parents."""
beam_length = values.shape[0]
num_beams = values.shape[1]
res = np.zeros_like(values)
res[-1, :] = values[-1, :]
for beam_id in range(num_beams):
parent = parents[-1][beam_id]
for level in reversed(range(beam_length - 1)):
res[level, beam_id] = values[level][parent]
parent = parents[level][parent]
return np.array(res).astype(values.dtype)
def gather_tree(values, parents):
"""Tensor version of gather_tree_py"""
res = tf.py_func(
func=gather_tree_py, inp=[values, parents], Tout=values.dtype)
res.set_shape(values.get_shape().as_list())
return res
def create_initial_beam_state(config):
"""Creates an instance of `BeamState` that can be used on the first
call to `beam_step`.
Args:
config: A BeamSearchConfig
Returns:
An instance of `BeamState`.
"""
return BeamSearchState(
log_probs=tf.zeros([config.beam_width]),
finished=tf.zeros(
[config.beam_width], dtype=tf.bool),
lengths=tf.zeros(
[config.beam_width], dtype=tf.int32))
def length_penalty(sequence_lengths, penalty_factor):
"""Calculates the length penalty according to
https://arxiv.org/abs/1609.08144
Args:
sequence_lengths: The sequence length of all hypotheses, a tensor
of shape [beam_size, vocab_size].
penalty_factor: A scalar that weights the length penalty.
Returns:
The length penalty factor, a tensor fo shape [beam_size].
"""
return tf.div((5. + tf.to_float(sequence_lengths))**penalty_factor, (5. + 1.)
**penalty_factor)
def hyp_score(log_probs, sequence_lengths, config):
"""Calculates scores for beam search hypotheses.
"""
# Calculate the length penality
length_penality_ = length_penalty(
sequence_lengths=sequence_lengths,
penalty_factor=config.length_penalty_weight)
score = log_probs / length_penality_
return score
def choose_top_k(scores_flat, config):
"""Chooses the top-k beams as successors.
"""
next_beam_scores, word_indices = tf.nn.top_k(scores_flat, k=config.beam_width)
return next_beam_scores, word_indices
def nest_map(inputs, map_fn, name=None):
"""Applies a function to (possibly nested) tuple of tensors.
"""
if nest.is_sequence(inputs):
inputs_flat = nest.flatten(inputs)
y_flat = [map_fn(_) for _ in inputs_flat]
outputs = nest.pack_sequence_as(inputs, y_flat)
else:
outputs = map_fn(inputs)
if name:
outputs = tf.identity(outputs, name=name)
return outputs
def mask_probs(probs, eos_token, finished):
"""Masks log probabilities such that finished beams
allocate all probability mass to eos. Unfinished beams remain unchanged.
Args:
probs: Log probabiltiies of shape `[beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to
finished: A boolean tensor of shape `[beam_width]` that specifies which
elements in the beam are finished already.
Returns:
A tensor of shape `[beam_width, vocab_size]`, where unfinished beams
stay unchanged and finished beams are replaced with a tensor that has all
probability on the EOS token.
"""
vocab_size = tf.shape(probs)[1]
finished_mask = tf.expand_dims(tf.to_float(1. - tf.to_float(finished)), 1)
# These examples are not finished and we leave them
non_finished_examples = finished_mask * probs
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = tf.one_hot(
eos_token,
vocab_size,
dtype=tf.float32,
on_value=0.,
off_value=tf.float32.min)
finished_examples = (1. - finished_mask) * finished_row
return finished_examples + non_finished_examples
def beam_search_step(time_, logits, beam_state, config):
"""Performs a single step of Beam Search Decoding.
Args:
time_: Beam search time step, should start at 0. At time 0 we assume
that all beams are equal and consider only the first beam for
continuations.
logits: Logits at the current time step. A tensor of shape `[B, vocab_size]`
beam_state: Current state of the beam search. An instance of `BeamState`
config: An instance of `BeamSearchConfig`
Returns:
A new beam state.
"""
# Calculate the current lengths of the predictions
prediction_lengths = beam_state.lengths
previously_finished = beam_state.finished
# Calculate the total log probs for the new hypotheses
# Final Shape: [beam_width, vocab_size]
probs = tf.nn.log_softmax(logits)
probs = mask_probs(probs, config.eos_token, previously_finished)
total_probs = tf.expand_dims(beam_state.log_probs, 1) + probs
# Calculate the continuation lengths
# We add 1 to all continuations that are not EOS and were not
# finished previously
lengths_to_add = tf.one_hot([config.eos_token] * config.beam_width,
config.vocab_size, 0, 1)
add_mask = (1 - tf.to_int32(previously_finished))
lengths_to_add = tf.expand_dims(add_mask, 1) * lengths_to_add
new_prediction_lengths = tf.expand_dims(prediction_lengths,
1) + lengths_to_add
# Calculate the scores for each beam
scores = hyp_score(
log_probs=total_probs,
sequence_lengths=new_prediction_lengths,
config=config)
scores_flat = tf.reshape(scores, [-1])
# During the first time step we only consider the initial beam
scores_flat = tf.cond(
tf.convert_to_tensor(time_) > 0, lambda: scores_flat, lambda: scores[0])
# Pick the next beams according to the specified successors function
next_beam_scores, word_indices = config.choose_successors_fn(scores_flat,
config)
next_beam_scores.set_shape([config.beam_width])
word_indices.set_shape([config.beam_width])
# Pick out the probs, beam_ids, and states according to the chosen predictions
total_probs_flat = tf.reshape(total_probs, [-1], name="total_probs_flat")
next_beam_probs = tf.gather(total_probs_flat, word_indices)
next_beam_probs.set_shape([config.beam_width])
next_word_ids = tf.mod(word_indices, config.vocab_size)
next_beam_ids = tf.div(word_indices, config.vocab_size)
# Append new ids to current predictions
next_finished = tf.logical_or(
tf.gather(beam_state.finished, next_beam_ids),
tf.equal(next_word_ids, config.eos_token))
# Calculate the length of the next predictions.
# 1. Finished beams remain unchanged
# 2. Beams that are now finished (EOS predicted) remain unchanged
# 3. Beams that are not yet finished have their length increased by 1
lengths_to_add = tf.to_int32(tf.not_equal(next_word_ids, config.eos_token))
lengths_to_add = (1 - tf.to_int32(next_finished)) * lengths_to_add
next_prediction_len = tf.gather(beam_state.lengths, next_beam_ids)
next_prediction_len += lengths_to_add
next_state = BeamSearchState(
log_probs=next_beam_probs,
lengths=next_prediction_len,
finished=next_finished)
output = BeamSearchStepOutput(
scores=next_beam_scores,
predicted_ids=next_word_ids,
beam_parent_ids=next_beam_ids)
return output, next_state
| |
from __future__ import annotations
from typing import Callable, Generic, TypeVar
T = TypeVar("T")
U = TypeVar("U")
class DoubleLinkedListNode(Generic[T, U]):
"""
Double Linked List Node built specifically for LFU Cache
>>> node = DoubleLinkedListNode(1,1)
>>> node
Node: key: 1, val: 1, freq: 0, has next: False, has prev: False
"""
def __init__(self, key: T | None, val: U | None):
self.key = key
self.val = val
self.freq: int = 0
self.next: DoubleLinkedListNode[T, U] | None = None
self.prev: DoubleLinkedListNode[T, U] | None = None
def __repr__(self) -> str:
return "Node: key: {}, val: {}, freq: {}, has next: {}, has prev: {}".format(
self.key, self.val, self.freq, self.next is not None, self.prev is not None
)
class DoubleLinkedList(Generic[T, U]):
"""
Double Linked List built specifically for LFU Cache
>>> dll: DoubleLinkedList = DoubleLinkedList()
>>> dll
DoubleLinkedList,
Node: key: None, val: None, freq: 0, has next: True, has prev: False,
Node: key: None, val: None, freq: 0, has next: False, has prev: True
>>> first_node = DoubleLinkedListNode(1,10)
>>> first_node
Node: key: 1, val: 10, freq: 0, has next: False, has prev: False
>>> dll.add(first_node)
>>> dll
DoubleLinkedList,
Node: key: None, val: None, freq: 0, has next: True, has prev: False,
Node: key: 1, val: 10, freq: 1, has next: True, has prev: True,
Node: key: None, val: None, freq: 0, has next: False, has prev: True
>>> # node is mutated
>>> first_node
Node: key: 1, val: 10, freq: 1, has next: True, has prev: True
>>> second_node = DoubleLinkedListNode(2,20)
>>> second_node
Node: key: 2, val: 20, freq: 0, has next: False, has prev: False
>>> dll.add(second_node)
>>> dll
DoubleLinkedList,
Node: key: None, val: None, freq: 0, has next: True, has prev: False,
Node: key: 1, val: 10, freq: 1, has next: True, has prev: True,
Node: key: 2, val: 20, freq: 1, has next: True, has prev: True,
Node: key: None, val: None, freq: 0, has next: False, has prev: True
>>> removed_node = dll.remove(first_node)
>>> assert removed_node == first_node
>>> dll
DoubleLinkedList,
Node: key: None, val: None, freq: 0, has next: True, has prev: False,
Node: key: 2, val: 20, freq: 1, has next: True, has prev: True,
Node: key: None, val: None, freq: 0, has next: False, has prev: True
>>> # Attempt to remove node not on list
>>> removed_node = dll.remove(first_node)
>>> removed_node is None
True
>>> # Attempt to remove head or rear
>>> dll.head
Node: key: None, val: None, freq: 0, has next: True, has prev: False
>>> dll.remove(dll.head) is None
True
>>> # Attempt to remove head or rear
>>> dll.rear
Node: key: None, val: None, freq: 0, has next: False, has prev: True
>>> dll.remove(dll.rear) is None
True
"""
def __init__(self) -> None:
self.head: DoubleLinkedListNode[T, U] = DoubleLinkedListNode(None, None)
self.rear: DoubleLinkedListNode[T, U] = DoubleLinkedListNode(None, None)
self.head.next, self.rear.prev = self.rear, self.head
def __repr__(self) -> str:
rep = ["DoubleLinkedList"]
node = self.head
while node.next is not None:
rep.append(str(node))
node = node.next
rep.append(str(self.rear))
return ",\n ".join(rep)
def add(self, node: DoubleLinkedListNode[T, U]) -> None:
"""
Adds the given node at the tail of the list and shifting it to proper position
"""
previous = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
previous.next = node
node.prev = previous
self.rear.prev = node
node.next = self.rear
node.freq += 1
self._position_node(node)
def _position_node(self, node: DoubleLinkedListNode[T, U]) -> None:
"""
Moves node forward to maintain invariant of sort by freq value
"""
while node.prev is not None and node.prev.freq > node.freq:
# swap node with previous node
previous_node = node.prev
node.prev = previous_node.prev
previous_node.next = node.prev
node.next = previous_node
previous_node.prev = node
def remove(
self, node: DoubleLinkedListNode[T, U]
) -> DoubleLinkedListNode[T, U] | None:
"""
Removes and returns the given node from the list
Returns None if node.prev or node.next is None
"""
if node.prev is None or node.next is None:
return None
node.prev.next = node.next
node.next.prev = node.prev
node.prev = None
node.next = None
return node
class LFUCache(Generic[T, U]):
"""
LFU Cache to store a given capacity of data. Can be used as a stand-alone object
or as a function decorator.
>>> cache = LFUCache(2)
>>> cache.set(1, 1)
>>> cache.set(2, 2)
>>> cache.get(1)
1
>>> cache.set(3, 3)
>>> cache.get(2) is None
True
>>> cache.set(4, 4)
>>> cache.get(1) is None
True
>>> cache.get(3)
3
>>> cache.get(4)
4
>>> cache
CacheInfo(hits=3, misses=2, capacity=2, current_size=2)
>>> @LFUCache.decorator(100)
... def fib(num):
... if num in (1, 2):
... return 1
... return fib(num - 1) + fib(num - 2)
>>> for i in range(1, 101):
... res = fib(i)
>>> fib.cache_info()
CacheInfo(hits=196, misses=100, capacity=100, current_size=100)
"""
# class variable to map the decorator functions to their respective instance
decorator_function_to_instance_map: dict[Callable[[T], U], LFUCache[T, U]] = {}
def __init__(self, capacity: int):
self.list: DoubleLinkedList[T, U] = DoubleLinkedList()
self.capacity = capacity
self.num_keys = 0
self.hits = 0
self.miss = 0
self.cache: dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__(self) -> str:
"""
Return the details for the cache instance
[hits, misses, capacity, current_size]
"""
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current_size={self.num_keys})"
)
def __contains__(self, key: T) -> bool:
"""
>>> cache = LFUCache(1)
>>> 1 in cache
False
>>> cache.set(1, 1)
>>> 1 in cache
True
"""
return key in self.cache
def get(self, key: T) -> U | None:
"""
Returns the value for the input key and updates the Double Linked List. Returns
Returns None if key is not present in cache
"""
if key in self.cache:
self.hits += 1
value_node: DoubleLinkedListNode[T, U] = self.cache[key]
node = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(node)
return node.val
self.miss += 1
return None
def set(self, key: T, value: U) -> None:
"""
Sets the value for the input key and updates the Double Linked List
"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node when over capacity
first_node = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert self.list.remove(first_node) is not None
# first_node guaranteed to be in list
del self.cache[first_node.key]
self.num_keys -= 1
self.cache[key] = DoubleLinkedListNode(key, value)
self.list.add(self.cache[key])
self.num_keys += 1
else:
node = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
node.val = value
self.list.add(node)
@classmethod
def decorator(
cls: type[LFUCache[T, U]], size: int = 128
) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""
Decorator version of LFU Cache
Decorated function must be function of T -> U
"""
def cache_decorator_inner(func: Callable[[T], U]) -> Callable[..., U]:
def cache_decorator_wrapper(*args: T) -> U:
if func not in cls.decorator_function_to_instance_map:
cls.decorator_function_to_instance_map[func] = LFUCache(size)
result = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
result = func(*args)
cls.decorator_function_to_instance_map[func].set(args[0], result)
return result
def cache_info() -> LFUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(cache_decorator_wrapper, "cache_info", cache_info)
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Create the UCSC chain file which is needed to lift over from one coordinate
system to another.
File format:
<http://genome.ucsc.edu/goldenPath/help/chain.html>
chain 4900 chrY 58368225 + 25985403 25985638 chr5 151006098 - 43257292 43257528 1
9 1 0
10 0 5
48
Header Line:
chain score tName tSize tStrand tStart tEnd qName qSize qStrand qStart qEnd id
Alignment Data Lines
size dt dq
NOTE: The last line of the alignment section contains only one number: the ungapped
alignment size of the last block.
"""
import os.path as op
import sys
import logging
from jcvi.formats.base import BaseFile, read_block
from jcvi.apps.base import OptionParser, ActionDispatcher, sh, need_update, which
class ChainLine(object):
def __init__(self, chain, lines):
self.chain = chain
self.blocks = []
for line in lines:
atoms = line.split()
if len(atoms) == 1:
atoms += [0, 0]
if len(atoms) == 0:
continue
self.blocks.append([int(x) for x in atoms])
self.ungapped, self.dt, self.dq = zip(*self.blocks)
self.ungapped = sum(self.ungapped)
self.dt = sum(self.dt)
self.dq = sum(self.dq)
class Chain(BaseFile):
def __init__(self, filename):
super(Chain, self).__init__(filename)
self.chains = list(self.iter_chain())
self.ungapped = sum(x.ungapped for x in self.chains)
self.dt = sum(x.dt for x in self.chains)
self.dq = sum(x.dq for x in self.chains)
def __len__(self):
return len(self.chains)
def iter_chain(self):
fp = open(self.filename)
for row in fp:
if row[0] != "#":
break
for chain, lines in read_block(fp, "chain"):
lines = list(lines)
yield ChainLine(chain, lines)
def main():
actions = (
("blat", "generate PSL file using BLAT"),
("frompsl", "generate chain file from PSL format"),
("fromagp", "generate chain file from AGP format"),
("summary", "provide stats of the chain file"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def summary(args):
"""
%prog summary old.new.chain old.fasta new.fasta
Provide stats of the chain file.
"""
from jcvi.formats.fasta import summary as fsummary
from jcvi.utils.cbook import percentage, human_size
p = OptionParser(summary.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
chainfile, oldfasta, newfasta = args
chain = Chain(chainfile)
ungapped, dt, dq = chain.ungapped, chain.dt, chain.dq
print(
"File `{0}` contains {1} chains.".format(chainfile, len(chain)), file=sys.stderr
)
print(
"ungapped={0} dt={1} dq={2}".format(
human_size(ungapped), human_size(dt), human_size(dq)
),
file=sys.stderr,
)
oldreal, oldnn, oldlen = fsummary([oldfasta, "--outfile=/dev/null"])
print(
"Old fasta (`{0}`) mapped: {1}".format(oldfasta, percentage(ungapped, oldreal)),
file=sys.stderr,
)
newreal, newnn, newlen = fsummary([newfasta, "--outfile=/dev/null"])
print(
"New fasta (`{0}`) mapped: {1}".format(newfasta, percentage(ungapped, newreal)),
file=sys.stderr,
)
def fromagp(args):
"""
%prog fromagp agpfile componentfasta objectfasta
Generate chain file from AGP format. The components represent the old
genome (target) and the objects represent new genome (query).
"""
from jcvi.formats.agp import AGP
from jcvi.formats.sizes import Sizes
p = OptionParser(fromagp.__doc__)
p.add_option(
"--novalidate", default=False, action="store_true", help="Do not validate AGP"
)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
agpfile, componentfasta, objectfasta = args
chainfile = agpfile.rsplit(".", 1)[0] + ".chain"
fw = open(chainfile, "w")
agp = AGP(agpfile, validate=(not opts.novalidate))
componentsizes = Sizes(componentfasta).mapping
objectsizes = Sizes(objectfasta).mapping
chain = "chain"
score = 1000
tStrand = "+"
id = 0
for a in agp:
if a.is_gap:
continue
tName = a.component_id
tSize = componentsizes[tName]
tStart = a.component_beg
tEnd = a.component_end
tStart -= 1
qName = a.object
qSize = objectsizes[qName]
qStrand = "-" if a.orientation == "-" else "+"
qStart = a.object_beg
qEnd = a.object_end
if qStrand == "-":
_qStart = qSize - qEnd + 1
_qEnd = qSize - qStart + 1
qStart, qEnd = _qStart, _qEnd
qStart -= 1
id += 1
size = a.object_span
headerline = "\t".join(
str(x)
for x in (
chain,
score,
tName,
tSize,
tStrand,
tStart,
tEnd,
qName,
qSize,
qStrand,
qStart,
qEnd,
id,
)
)
alignmentline = size
print(headerline, file=fw)
print(alignmentline, file=fw)
print(file=fw)
fw.close()
logging.debug("File written to `%s`.", chainfile)
def faToTwoBit(fastafile):
twobitfile = fastafile.rsplit(".", 1)[0] + ".2bit"
cmd = "faToTwoBit {0} {1}".format(fastafile, twobitfile)
if need_update(fastafile, twobitfile):
sh(cmd)
return twobitfile
def blat(args):
"""
%prog blat old.fasta new.fasta
Generate psl file using blat.
"""
p = OptionParser(blat.__doc__)
p.add_option(
"--minscore",
default=100,
type="int",
help="Matches minus mismatches gap penalty",
)
p.add_option(
"--minid",
default=98,
type="int",
help="Minimum sequence identity",
)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
oldfasta, newfasta = args
twobitfiles = []
for fastafile in args:
tbfile = faToTwoBit(fastafile)
twobitfiles.append(tbfile)
oldtwobit, newtwobit = twobitfiles
cmd = "pblat -threads={0}".format(opts.cpus) if which("pblat") else "blat"
cmd += " {0} {1}".format(oldtwobit, newfasta)
cmd += " -tileSize=12 -minScore={0} -minIdentity={1} ".format(
opts.minscore, opts.minid
)
pslfile = "{0}.{1}.psl".format(
*(op.basename(x).split(".")[0] for x in (newfasta, oldfasta))
)
cmd += pslfile
sh(cmd)
def frompsl(args):
"""
%prog frompsl old.new.psl old.fasta new.fasta
Generate chain file from psl file. The pipeline is describe in:
<http://genomewiki.ucsc.edu/index.php/Minimal_Steps_For_LiftOver>
"""
from jcvi.formats.sizes import Sizes
p = OptionParser(frompsl.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
pslfile, oldfasta, newfasta = args
pf = oldfasta.split(".")[0]
# Chain together alignments from using axtChain
chainfile = pf + ".chain"
twobitfiles = []
for fastafile in (oldfasta, newfasta):
tbfile = faToTwoBit(fastafile)
twobitfiles.append(tbfile)
oldtwobit, newtwobit = twobitfiles
if need_update(pslfile, chainfile):
cmd = "axtChain -linearGap=medium -psl {0}".format(pslfile)
cmd += " {0} {1} {2}".format(oldtwobit, newtwobit, chainfile)
sh(cmd)
# Sort chain files
sortedchain = chainfile.rsplit(".", 1)[0] + ".sorted.chain"
if need_update(chainfile, sortedchain):
cmd = "chainSort {0} {1}".format(chainfile, sortedchain)
sh(cmd)
# Make alignment nets from chains
netfile = pf + ".net"
oldsizes = Sizes(oldfasta).filename
newsizes = Sizes(newfasta).filename
if need_update((sortedchain, oldsizes, newsizes), netfile):
cmd = "chainNet {0} {1} {2}".format(sortedchain, oldsizes, newsizes)
cmd += " {0} /dev/null".format(netfile)
sh(cmd)
# Create liftOver chain file
liftoverfile = pf + ".liftover.chain"
if need_update((netfile, sortedchain), liftoverfile):
cmd = "netChainSubset {0} {1} {2}".format(netfile, sortedchain, liftoverfile)
sh(cmd)
if __name__ == "__main__":
main()
| |
"""Test for the LCN cover platform."""
from unittest.mock import patch
from pypck.inputs import ModStatusOutput, ModStatusRelays
from pypck.lcn_addr import LcnAddr
from pypck.lcn_defs import MotorReverseTime, MotorStateModifier
from homeassistant.components.cover import DOMAIN as DOMAIN_COVER
from homeassistant.components.lcn.helpers import get_device_connection
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_STOP_COVER,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
STATE_UNAVAILABLE,
)
from .conftest import MockModuleConnection
async def test_setup_lcn_cover(hass, entry, lcn_connection):
"""Test the setup of cover."""
for entity_id in (
"cover.cover_outputs",
"cover.cover_relays",
):
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OPEN
async def test_entity_attributes(hass, entry, lcn_connection):
"""Test the attributes of an entity."""
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entity_outputs = entity_registry.async_get("cover.cover_outputs")
assert entity_outputs
assert entity_outputs.unique_id == f"{entry.entry_id}-m000007-outputs"
assert entity_outputs.original_name == "Cover_Outputs"
entity_relays = entity_registry.async_get("cover.cover_relays")
assert entity_relays
assert entity_relays.unique_id == f"{entry.entry_id}-m000007-motor1"
assert entity_relays.original_name == "Cover_Relays"
@patch.object(MockModuleConnection, "control_motors_outputs")
async def test_outputs_open(control_motors_outputs, hass, lcn_connection):
"""Test the outputs cover opens."""
state = hass.states.get("cover.cover_outputs")
state.state = STATE_CLOSED
# command failed
control_motors_outputs.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(
MotorStateModifier.UP, MotorReverseTime.RT1200
)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state != STATE_OPENING
# command success
control_motors_outputs.reset_mock(return_value=True)
control_motors_outputs.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(
MotorStateModifier.UP, MotorReverseTime.RT1200
)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state == STATE_OPENING
@patch.object(MockModuleConnection, "control_motors_outputs")
async def test_outputs_close(control_motors_outputs, hass, lcn_connection):
"""Test the outputs cover closes."""
state = hass.states.get("cover.cover_outputs")
state.state = STATE_OPEN
# command failed
control_motors_outputs.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(
MotorStateModifier.DOWN, MotorReverseTime.RT1200
)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state != STATE_CLOSING
# command success
control_motors_outputs.reset_mock(return_value=True)
control_motors_outputs.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(
MotorStateModifier.DOWN, MotorReverseTime.RT1200
)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state == STATE_CLOSING
@patch.object(MockModuleConnection, "control_motors_outputs")
async def test_outputs_stop(control_motors_outputs, hass, lcn_connection):
"""Test the outputs cover stops."""
state = hass.states.get("cover.cover_outputs")
state.state = STATE_CLOSING
# command failed
control_motors_outputs.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(MotorStateModifier.STOP)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state == STATE_CLOSING
# command success
control_motors_outputs.reset_mock(return_value=True)
control_motors_outputs.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(MotorStateModifier.STOP)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state not in (STATE_CLOSING, STATE_OPENING)
@patch.object(MockModuleConnection, "control_motors_relays")
async def test_relays_open(control_motors_relays, hass, lcn_connection):
"""Test the relays cover opens."""
states = [MotorStateModifier.NOCHANGE] * 4
states[0] = MotorStateModifier.UP
state = hass.states.get("cover.cover_relays")
state.state = STATE_CLOSED
# command failed
control_motors_relays.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state != STATE_OPENING
# command success
control_motors_relays.reset_mock(return_value=True)
control_motors_relays.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state == STATE_OPENING
@patch.object(MockModuleConnection, "control_motors_relays")
async def test_relays_close(control_motors_relays, hass, lcn_connection):
"""Test the relays cover closes."""
states = [MotorStateModifier.NOCHANGE] * 4
states[0] = MotorStateModifier.DOWN
state = hass.states.get("cover.cover_relays")
state.state = STATE_OPEN
# command failed
control_motors_relays.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state != STATE_CLOSING
# command success
control_motors_relays.reset_mock(return_value=True)
control_motors_relays.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state == STATE_CLOSING
@patch.object(MockModuleConnection, "control_motors_relays")
async def test_relays_stop(control_motors_relays, hass, lcn_connection):
"""Test the relays cover stops."""
states = [MotorStateModifier.NOCHANGE] * 4
states[0] = MotorStateModifier.STOP
state = hass.states.get("cover.cover_relays")
state.state = STATE_CLOSING
# command failed
control_motors_relays.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state == STATE_CLOSING
# command success
control_motors_relays.reset_mock(return_value=True)
control_motors_relays.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state not in (STATE_CLOSING, STATE_OPENING)
async def test_pushed_outputs_status_change(hass, entry, lcn_connection):
"""Test the outputs cover changes its state on status received."""
device_connection = get_device_connection(hass, (0, 7, False), entry)
address = LcnAddr(0, 7, False)
state = hass.states.get("cover.cover_outputs")
state.state = STATE_CLOSED
# push status "open"
input = ModStatusOutput(address, 0, 100)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state == STATE_OPENING
# push status "stop"
input = ModStatusOutput(address, 0, 0)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state not in (STATE_OPENING, STATE_CLOSING)
# push status "close"
input = ModStatusOutput(address, 1, 100)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state == STATE_CLOSING
async def test_pushed_relays_status_change(hass, entry, lcn_connection):
"""Test the relays cover changes its state on status received."""
device_connection = get_device_connection(hass, (0, 7, False), entry)
address = LcnAddr(0, 7, False)
states = [False] * 8
state = hass.states.get("cover.cover_relays")
state.state = STATE_CLOSED
# push status "open"
states[0:2] = [True, False]
input = ModStatusRelays(address, states)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state == STATE_OPENING
# push status "stop"
states[0] = False
input = ModStatusRelays(address, states)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state not in (STATE_OPENING, STATE_CLOSING)
# push status "close"
states[0:2] = [True, True]
input = ModStatusRelays(address, states)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state == STATE_CLOSING
async def test_unload_config_entry(hass, entry, lcn_connection):
"""Test the cover is removed when the config entry is unloaded."""
await hass.config_entries.async_unload(entry.entry_id)
assert hass.states.get("cover.cover_outputs").state == STATE_UNAVAILABLE
assert hass.states.get("cover.cover_relays").state == STATE_UNAVAILABLE
| |
'''
Created on Feb 02, 2014
@author: Mark V Systems Limited
(c) Copyright 2014 Mark V Systems Limited, All rights reserved.
'''
try:
import regex as re
except ImportError:
import re
from collections import defaultdict
import os, io, json
from datetime import datetime, timedelta
from arelle import XbrlConst
from arelle.ModelDtsObject import ModelConcept
# regular expression components
STMT = r".* - statement - "
notDET = r"(?!.*details)"
notCMPRH = r"(?!.*comprehensive)"
isCMPRH = r"(?=.*comprehensive)"
''' common mis-spellings of parenthetical to match successfully (from 2013 SEC filings)
paranthetical
parenthical
parentheical
parenthtical
parenthethical
parenthentical
prenthetical
parenethetical
use a regular expression that is forgiving on at least the above
and doens't match variations of parent, transparent, etc.
'''
rePARENTHETICAL = r"pa?r[ae]ne?th\w?[aei]+\w?t?h?i?c"
notPAR = "(?!.*" + rePARENTHETICAL + ")"
isPAR = "(?=.*" + rePARENTHETICAL + ")"
UGT_TOPICS = None
def RE(*args):
return re.compile(''.join(args), re.IGNORECASE)
# NOTE: This is an early experimental implementation of statement detection
# it is not in a finished status at this time.
EFMtableCodes = [
# ELRs are parsed for these patterns in sort order until there is one match per code
# sheet(s) may be plural
# statement detection including root element of presentation link role
("BS", RE(STMT, notDET, notPAR), ("StatementOfFinancialPositionAbstract",)),
("BSP", RE(STMT, notDET, isPAR), ("StatementOfFinancialPositionAbstract",)),
("IS", RE(STMT, notDET, notPAR), ("IncomeStatementAbstract",)),
("ISP", RE(STMT, notDET, isPAR), ("IncomeStatementAbstract",)),
("CI", RE(STMT, notDET, notPAR), ("StatementOfIncomeAndComprehensiveIncomeAbstract",)),
("CIP", RE(STMT, notDET, isPAR), ("StatementOfIncomeAndComprehensiveIncomeAbstract",)),
("EQ", RE(STMT, notDET, notPAR), ("StatementOfStockholdersEquityAbstract","StatementOfPartnersCapitalAbstract")),
("EQP", RE(STMT, notDET, isPAR), ("StatementOfStockholdersEquityAbstract","StatementOfPartnersCapitalAbstract")),
("CF", RE(STMT, notDET, notPAR), ("StatementOfCashFlowsAbstract",)),
("CFP", RE(STMT, notDET, isPAR), ("StatementOfCashFlowsAbstract",)),
("CA", RE(STMT, notDET, notPAR), ("CapitalizationLongtermDebtAndEquityAbstract",)),
("CAP", RE(STMT, notDET, isPAR), ("CapitalizationLongtermDebtAndEquityAbstract",)),
("IN", RE(STMT, notDET, notPAR), ("ScheduleOfInvestmentsAbstract",)),
("INP", RE(STMT, notDET, isPAR), ("ScheduleOfInvestmentsAbstract",)),
# statement detection without considering root elements
("DEI", RE(r".* - (document|statement) - .*document\W+.*entity\W+.*information"), None),
("BS", RE(STMT, notDET, notPAR, r".*balance\W+sheet"), None),
("BSP", RE(STMT, notDET, isPAR, r".*balance\W+sheet"), None),
("CF", RE(STMT, notDET, notPAR, r".*cash\W*flow"), None),
("IS", RE(STMT, notDET, notPAR, notCMPRH, r".*(income|loss)"), None),
("ISP", RE(STMT, notDET, isPAR, notCMPRH, r".*(income|loss)"), None),
("CI", RE(STMT, notDET, notPAR, isCMPRH, r".*(income|loss|earnings)"), None),
("CIP", RE(STMT, notDET, isPAR, isCMPRH, r".*(income|loss|earnings)"), None),
("CA", RE(STMT, notDET, notPAR, r".*capitali[sz]ation"), None),
("CAP", RE(STMT, notDET, isPAR, r".*capitali[sz]ation"), None),
("EQ", RE(STMT, notDET, notPAR, r".*(equity|capital)"), None),
("EQP", RE(STMT, notDET, isPAR, r".*(equity|capital)"), None),
("IS", RE(STMT, notDET, notPAR, r".*(income|operations|earning)"), None),
("EQ", RE(STMT, notDET, notPAR, r".*def[ei][cs]it"), None),
("ISP", RE(STMT, notDET, isPAR, r".*(income|operations|earning)"), None),
("CFP", RE(STMT, notDET, isPAR, r".*cash\W*flow.*"), None),
("IS", RE(STMT, notDET, notPAR, r".*loss"), None),
("ISP", RE(STMT, notDET, isPAR, r".*loss"), None),
("BS", RE(STMT, notDET, notPAR, r".*(position|condition)"), None),
("BSP", RE(STMT, notDET, isPAR, r".*(position|condition)"), None),
("SE", RE(STMT, notDET, notPAR, r"(?=.*equity).*comprehensive"), None),
("EQ", RE(STMT, notDET, notPAR, r".*shareholder[']?s[']?\W+investment"), None),
("EQP", RE(STMT, notDET, isPAR, r".*shareholder[']?s[']?\W+investment"), None),
("EQ", RE(STMT, notDET, notPAR, r".*retained\W+earning"), None),
("IN", RE(STMT, notDET, notPAR, r".*investment"), None),
("INP", RE(STMT, notDET, isPAR, r".*investment"), None),
("LA", RE(STMT, notDET, notPAR, r"(?!.*changes)(?=.*assets).*liquidati"), None),
("LC", RE(STMT, notDET, notPAR, r"(?=.*changes)(?=.*assets).*liquidati"), None),
("IS", RE(STMT, notDET, notPAR, r"(?=.*disc).*operation"), None),
("BS", RE(STMT, notDET, notPAR, r"(?!.*changes).*assets"), None),
("BSP", RE(STMT, notDET, isPAR, r"(?!.*changes).*assets"), None),
("EQ", RE(STMT, notDET, notPAR, r"(?=.*changes).*assets"), None),
("EQP", RE(STMT, notDET, isPAR, r"(?=.*changes).*assets"), None),
("FH", RE(STMT, notDET, notPAR, r"(?=.*financial).*highlight"), None),
("FHP", RE(STMT, notDET, isPAR, r"(?=.*financial).*highlight"), None),
("EQ", RE(STMT, notDET, notPAR, r"(?=.*reserve).*trust"), None),
("EQP", RE(STMT, notDET, isPAR, r"(?=.*reserve).*trust"), None),
("LC", RE(STMT, notDET, notPAR, r"(?=.*activities).*liquidati"), None),
("EQP", RE(STMT, notDET, isPAR, r".*def[ei][cs]it"), None),
("BSV", RE(STMT, notDET,notPAR, r".*net\W+asset\W+value"), None),
("CFS", RE(STMT, notDET,notPAR, r".*cash\W*flows\W+supplemental"), None),
("LAP", RE(STMT, notDET, isPAR, r".*(?!.*changes)(?=.*assets).*liquidati"), None)
]
HMRCtableCodes = [
# ELRs are parsed for these patterns in sort order until there is one match per code
# sheet(s) may be plural
("DEI", RE(r".*entity\W+.*information.*"), None),
("BS", RE(r".*balance\W+sheet.*"), None),
("IS", RE(r".*loss"), None),
("CF", RE(r".*cash\W*flow.*"), None),
("SE", RE(r".*(shareholder|equity).*"), None),
]
def evaluateRoleTypesTableCodes(modelXbrl):
disclosureSystem = modelXbrl.modelManager.disclosureSystem
if disclosureSystem.validationType in ("EFM", "HMRC"):
detectMultipleOfCode = False
if disclosureSystem.validationType == "EFM":
tableCodes = list( EFMtableCodes ) # separate copy of list so entries can be deleted
# for Registration and resubmission allow detecting multiple of code
detectMultipleOfCode = any(v and any(v.startswith(dt) for dt in ('S-', 'F-', '8-K', '6-K'))
for docTypeConcept in modelXbrl.nameConcepts.get('DocumentType', ())
for docTypeFact in modelXbrl.factsByQname.get(docTypeConcept.qname, ())
for v in (docTypeFact.value,))
elif disclosureSystem.validationType == "HMRC":
tableCodes = list( HMRCtableCodes ) # separate copy of list so entries can be deleted
codeRoleURI = {} # lookup by code for roleURI
roleURICode = {} # lookup by roleURI
# resolve structural model
roleTypes = [roleType
for roleURI in modelXbrl.relationshipSet(XbrlConst.parentChild).linkRoleUris
for roleType in modelXbrl.roleTypes.get(roleURI,())]
roleTypes.sort(key=lambda roleType: roleType.definition)
# assign code to table link roles (Presentation ELRs)
for roleType in roleTypes:
definition = roleType.definition
rootConcepts = None
for i, tableCode in enumerate(tableCodes):
code, pattern, rootConceptNames = tableCode
if (detectMultipleOfCode or code not in codeRoleURI) and pattern.match(definition):
if rootConceptNames and rootConcepts is None:
rootConcepts = modelXbrl.relationshipSet(XbrlConst.parentChild, roleType.roleURI).rootConcepts
if (not rootConceptNames or
any(rootConcept.name in rootConceptNames for rootConcept in rootConcepts)):
codeRoleURI[code] = roleType.roleURI
roleURICode[roleType.roleURI] = code
if not detectMultipleOfCode:
del tableCodes[i] # done with looking at this code
break
# find defined non-default axes in pre hierarchy for table
for roleTypes in modelXbrl.roleTypes.values():
for roleType in roleTypes:
roleType._tableCode = roleURICode.get(roleType.roleURI)
else:
for roleTypes in modelXbrl.roleTypes.values():
for roleType in roleTypes:
roleType._tableCode = None
def evaluateTableIndex(modelXbrl, lang=None):
usgaapRoleDefinitionPattern = re.compile(r"([0-9]+) - (Statement|Disclosure|Schedule|Document) - (.+)")
ifrsRoleDefinitionPattern = re.compile(r"\[([0-9]+)\] (.+)")
# build EFM rendering-compatible index
definitionElrs = dict((modelXbrl.roleTypeDefinition(roleURI, lang), roleType)
for roleURI in modelXbrl.relationshipSet(XbrlConst.parentChild).linkRoleUris
for roleType in modelXbrl.roleTypes.get(roleURI,()))
sortedRoleTypes = sorted(definitionElrs.items(), key=lambda item: item[0])
disclosureSystem = modelXbrl.modelManager.disclosureSystem
_usgaapStyleELRs = _isJpFsa = _ifrsStyleELRs = False
if disclosureSystem.validationType == "EFM":
_usgaapStyleELRs = True
elif "jp-fsa" in modelXbrl.modelManager.disclosureSystem.names:
_isJpFsa = True
else:
# attempt to determine type
if any(usgaapRoleDefinitionPattern.match(r[0]) for r in sortedRoleTypes if r[0]):
_usgaapStyleELRs = True
elif any(ifrsRoleDefinitionPattern.match(r[0]) for r in sortedRoleTypes if r[0]):
_ifrsStyleELRs = True
if _usgaapStyleELRs:
COVER = "1Cover"
STMTS = "2Financial Statements"
NOTES = "3Notes to Financial Statements"
POLICIES = "4Accounting Policies"
TABLES = "5Notes Tables"
DETAILS = "6Notes Details"
UNCATEG = "7Uncategorized"
isRR = any(ns.startswith("http://xbrl.sec.gov/rr/") for ns in modelXbrl.namespaceDocs.keys() if ns)
tableGroup = None
firstTableLinkroleURI = None
firstDocumentLinkroleURI = None
for roleDefinition, roleType in sortedRoleTypes:
roleType._tableChildren = []
match = usgaapRoleDefinitionPattern.match(roleDefinition) if roleDefinition else None
if not match:
roleType._tableIndex = (UNCATEG, "", roleType.roleURI)
continue
seq, tblType, tblName = match.groups()
if isRR:
tableGroup = COVER
elif not tableGroup:
tableGroup = ("Paren" in tblName and COVER or tblType == "Statement" and STMTS or
"(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or
"(Detail" in tblName and DETAILS or COVER)
elif tableGroup == COVER:
tableGroup = (tblType == "Statement" and STMTS or "Paren" in tblName and COVER or
"(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or
"(Detail" in tblName and DETAILS or NOTES)
elif tableGroup == STMTS:
tableGroup = ((tblType == "Statement" or "Paren" in tblName) and STMTS or
"(Polic" in tblName and NOTES or "(Table" in tblName and TABLES or
"(Detail" in tblName and DETAILS or NOTES)
elif tableGroup == NOTES:
tableGroup = ("(Polic" in tblName and POLICIES or "(Table" in tblName and TABLES or
"(Detail" in tblName and DETAILS or tblType == "Disclosure" and NOTES or UNCATEG)
elif tableGroup == POLICIES:
tableGroup = ("(Table" in tblName and TABLES or "(Detail" in tblName and DETAILS or
("Paren" in tblName or "(Polic" in tblName) and POLICIES or UNCATEG)
elif tableGroup == TABLES:
tableGroup = ("(Detail" in tblName and DETAILS or
("Paren" in tblName or "(Table" in tblName) and TABLES or UNCATEG)
elif tableGroup == DETAILS:
tableGroup = (("Paren" in tblName or "(Detail" in tblName) and DETAILS or UNCATEG)
else:
tableGroup = UNCATEG
if firstTableLinkroleURI is None and tableGroup == COVER:
firstTableLinkroleURI = roleType.roleURI
if tblType == "Document" and not firstDocumentLinkroleURI:
firstDocumentLinkroleURI = roleType.roleURI
roleType._tableIndex = (tableGroup, seq, tblName)
# flow allocate facts to roles (SEC presentation groups)
if not modelXbrl.qnameDimensionDefaults: # may not have run validatino yet
from arelle import ValidateXbrlDimensions
ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl)
reportedFacts = set() # facts which were shown in a higher-numbered ELR table
factsByQname = modelXbrl.factsByQname
reportingPeriods = set()
nextEnd = None
deiFact = {}
for conceptName in ("DocumentPeriodEndDate", "DocumentType", "CurrentFiscalPeriodEndDate"):
for concept in modelXbrl.nameConcepts[conceptName]:
for fact in factsByQname[concept.qname]:
deiFact[conceptName] = fact
if fact.context is not None:
reportingPeriods.add((None, fact.context.endDatetime)) # for instant
reportingPeriods.add((fact.context.startDatetime, fact.context.endDatetime)) # for startEnd
nextEnd = fact.context.startDatetime
duration = (fact.context.endDatetime - fact.context.startDatetime).days + 1
break
if "DocumentType" in deiFact:
fact = deiFact["DocumentType"]
if "-Q" in fact.xValue:
# need quarterly and yr to date durations
endDatetime = fact.context.endDatetime
# if within 2 days of end of month use last day of month
endDatetimeMonth = endDatetime.month
if (endDatetime + timedelta(2)).month != endDatetimeMonth:
# near end of month
endOfMonth = True
while endDatetime.month == endDatetimeMonth:
endDatetime += timedelta(1) # go forward to next month
else:
endOfMonth = False
startYr = endDatetime.year
startMo = endDatetime.month - 3
if startMo <= 0:
startMo += 12
startYr -= 1
startDatetime = datetime(startYr, startMo, endDatetime.day, endDatetime.hour, endDatetime.minute, endDatetime.second)
if endOfMonth:
startDatetime -= timedelta(1)
endDatetime -= timedelta(1)
reportingPeriods.add((startDatetime, endDatetime))
duration = 91
# find preceding compatible default context periods
while (nextEnd is not None):
thisEnd = nextEnd
prevMaxStart = thisEnd - timedelta(duration * .9)
prevMinStart = thisEnd - timedelta(duration * 1.1)
nextEnd = None
for cntx in modelXbrl.contexts.values():
if (cntx.isStartEndPeriod and not cntx.qnameDims and thisEnd == cntx.endDatetime and
prevMinStart <= cntx.startDatetime <= prevMaxStart):
reportingPeriods.add((None, cntx.endDatetime))
reportingPeriods.add((cntx.startDatetime, cntx.endDatetime))
nextEnd = cntx.startDatetime
break
elif (cntx.isInstantPeriod and not cntx.qnameDims and thisEnd == cntx.endDatetime):
reportingPeriods.add((None, cntx.endDatetime))
stmtReportingPeriods = set(reportingPeriods)
sortedRoleTypes.reverse() # now in descending order
for i, roleTypes in enumerate(sortedRoleTypes):
roleDefinition, roleType = roleTypes
# find defined non-default axes in pre hierarchy for table
tableFacts = set()
tableGroup, tableSeq, tableName = roleType._tableIndex
roleURIdims, priItemQNames = EFMlinkRoleURIstructure(modelXbrl, roleType.roleURI)
for priItemQName in priItemQNames:
for fact in factsByQname[priItemQName]:
cntx = fact.context
# non-explicit dims must be default
if (cntx is not None and
all(dimQn in modelXbrl.qnameDimensionDefaults
for dimQn in (roleURIdims.keys() - cntx.qnameDims.keys())) and
all(mdlDim.memberQname in roleURIdims[dimQn]
for dimQn, mdlDim in cntx.qnameDims.items()
if dimQn in roleURIdims)):
# the flow-up part, drop
cntxStartDatetime = cntx.startDatetime
cntxEndDatetime = cntx.endDatetime
if (tableGroup != STMTS or
(cntxStartDatetime, cntxEndDatetime) in stmtReportingPeriods and
(fact not in reportedFacts or
all(dimQn not in cntx.qnameDims # unspecified dims are all defaulted if reported elsewhere
for dimQn in (cntx.qnameDims.keys() - roleURIdims.keys())))):
tableFacts.add(fact)
reportedFacts.add(fact)
roleType._tableFacts = tableFacts
# find parent if any
closestParentType = None
closestParentMatchLength = 0
for _parentRoleDefinition, parentRoleType in sortedRoleTypes[i+1:]:
matchLen = parentNameMatchLen(tableName, parentRoleType)
if matchLen > closestParentMatchLength:
closestParentMatchLength = matchLen
closestParentType = parentRoleType
if closestParentType is not None:
closestParentType._tableChildren.insert(0, roleType)
# remove lesser-matched children if there was a parent match
unmatchedChildRoles = set()
longestChildMatchLen = 0
numChildren = 0
for childRoleType in roleType._tableChildren:
matchLen = parentNameMatchLen(tableName, childRoleType)
if matchLen < closestParentMatchLength:
unmatchedChildRoles.add(childRoleType)
elif matchLen > longestChildMatchLen:
longestChildMatchLen = matchLen
numChildren += 1
if numChildren > 1:
# remove children that don't have the full match pattern length to parent
for childRoleType in roleType._tableChildren:
if (childRoleType not in unmatchedChildRoles and
parentNameMatchLen(tableName, childRoleType) < longestChildMatchLen):
unmatchedChildRoles.add(childRoleType)
for unmatchedChildRole in unmatchedChildRoles:
roleType._tableChildren.remove(unmatchedChildRole)
for childRoleType in roleType._tableChildren:
childRoleType._tableParent = roleType
unmatchedChildRoles = None # dereference
global UGT_TOPICS
if UGT_TOPICS is None:
try:
from arelle import FileSource
fh = FileSource.openFileStream(modelXbrl.modelManager.cntlr,
os.path.join(modelXbrl.modelManager.cntlr.configDir, "ugt-topics.zip/ugt-topics.json"),
'r', 'utf-8')
UGT_TOPICS = json.load(fh)
fh.close()
for topic in UGT_TOPICS:
topic[6] = set(topic[6]) # change concept abstracts list into concept abstracts set
topic[7] = set(topic[7]) # change concept text blocks list into concept text blocks set
topic[8] = set(topic[8]) # change concept names list into concept names set
except Exception as ex:
UGT_TOPICS = None
if UGT_TOPICS is not None:
def roleUgtConcepts(roleType):
roleConcepts = set()
for rel in modelXbrl.relationshipSet(XbrlConst.parentChild, roleType.roleURI).modelRelationships:
if isinstance(rel.toModelObject, ModelConcept):
roleConcepts.add(rel.toModelObject.name)
if isinstance(rel.fromModelObject, ModelConcept):
roleConcepts.add(rel.fromModelObject.name)
if hasattr(roleType, "_tableChildren"):
for _tableChild in roleType._tableChildren:
roleConcepts |= roleUgtConcepts(_tableChild)
return roleConcepts
topicMatches = {} # topicNum: (best score, roleType)
for roleDefinition, roleType in sortedRoleTypes:
roleTopicType = 'S' if roleDefinition.startswith('S') else 'D'
if getattr(roleType, "_tableParent", None) is None:
# rooted tables in reverse order
concepts = roleUgtConcepts(roleType)
for i, ugtTopic in enumerate(UGT_TOPICS):
if ugtTopic[0] == roleTopicType:
countAbstracts = len(concepts & ugtTopic[6])
countTextBlocks = len(concepts & ugtTopic[7])
countLineItems = len(concepts & ugtTopic[8])
if countAbstracts or countTextBlocks or countLineItems:
_score = (10 * countAbstracts +
1000 * countTextBlocks +
countLineItems / len(concepts))
if i not in topicMatches or _score > topicMatches[i][0]:
topicMatches[i] = (_score, roleType)
for topicNum, scoredRoleType in topicMatches.items():
_score, roleType = scoredRoleType
if _score > getattr(roleType, "_tableTopicScore", 0):
ugtTopic = UGT_TOPICS[topicNum]
roleType._tableTopicScore = _score
roleType._tableTopicType = ugtTopic[0]
roleType._tableTopicName = ugtTopic[3]
roleType._tableTopicCode = ugtTopic[4]
# print ("Match score {:.2f} topic {} preGrp {}".format(_score, ugtTopic[3], roleType.definition))
return (firstTableLinkroleURI or firstDocumentLinkroleURI), None # no restriction on contents linkroles
elif _isJpFsa:
# find ELR with only iod:identifierItem subs group concepts
roleElrs = dict((roleURI, roleType)
for roleURI in modelXbrl.relationshipSet(XbrlConst.parentChild).linkRoleUris
for roleType in modelXbrl.roleTypes.get(roleURI,()))
roleIdentifierItems = {}
for roleURI, roleType in roleElrs.items():
roleType._tableChildren = []
relSet = modelXbrl.relationshipSet(XbrlConst.parentChild, roleURI)
for rootConcept in relSet.rootConcepts:
if rootConcept.substitutionGroupQname and rootConcept.substitutionGroupQname.localName == "identifierItem":
roleIdentifierItems[rootConcept] = roleType
linkroleUri = None
for roleURI, roleType in roleElrs.items():
relSet = modelXbrl.relationshipSet(XbrlConst.parentChild, roleURI)
def addRoleIdentifiers(fromConcept, parentRoleType, visited):
for rel in relSet.fromModelObject(fromConcept):
_fromConcept = rel.fromModelObject
_toConcept = rel.toModelObject
if isinstance(_fromConcept, ModelConcept) and isinstance(_toConcept, ModelConcept):
_fromSubQn = _fromConcept.substitutionGroupQname
_toSubQn = _toConcept.substitutionGroupQname
if ((parentRoleType is not None or
(_fromSubQn and _fromSubQn.localName == "identifierItem" and _fromConcept in roleIdentifierItems )) and
_toSubQn and _toSubQn.localName == "identifierItem" and
_toConcept in roleIdentifierItems):
if parentRoleType is None:
parentRoleType = roleIdentifierItems[_fromConcept]
_toRoleType = roleIdentifierItems[_toConcept]
if _toConcept not in parentRoleType._tableChildren:
parentRoleType._tableChildren.append(_toRoleType)
if _toConcept not in visited:
visited.add(_toConcept)
addRoleIdentifiers(_toConcept, _toRoleType, visited)
visited.discard(_toConcept)
elif _toConcept not in visited:
visited.add(_toConcept)
addRoleIdentifiers(_toConcept, parentRoleType, visited)
visited.discard(_toConcept)
for rootConcept in relSet.rootConcepts:
addRoleIdentifiers(rootConcept, None, set())
if not linkroleUri and len(roleType._tableChildren) > 0:
linkroleUri = roleURI
return linkroleUri, linkroleUri # only show linkroleUri in index table
elif _ifrsStyleELRs:
for roleType in definitionElrs.values():
roleType._tableChildren = []
return sortedRoleTypes[0][1], None # first link role in order
return None, None
def parentNameMatchLen(tableName, parentRoleType):
lengthOfMatch = 0
parentName = parentRoleType._tableIndex[2]
parentNameLen = len(parentName.partition('(')[0])
fullWordFound = False
for c in tableName.partition('(')[0]:
fullWordFound |= c.isspace()
if lengthOfMatch >= parentNameLen or c != parentName[lengthOfMatch]:
break
lengthOfMatch += 1
return fullWordFound and lengthOfMatch
def EFMlinkRoleURIstructure(modelXbrl, roleURI):
relSet = modelXbrl.relationshipSet(XbrlConst.parentChild, roleURI)
dimMems = {} # by dimension qname, set of member qnames
priItems = set()
for rootConcept in relSet.rootConcepts:
EFMlinkRoleDescendants(relSet, rootConcept, dimMems, priItems)
return dimMems, priItems
def EFMlinkRoleDescendants(relSet, concept, dimMems, priItems):
if concept is not None:
if concept.isDimensionItem:
dimMems[concept.qname] = EFMdimMems(relSet, concept, set())
else:
if not concept.isAbstract:
priItems.add(concept.qname)
for rel in relSet.fromModelObject(concept):
EFMlinkRoleDescendants(relSet, rel.toModelObject, dimMems, priItems)
def EFMdimMems(relSet, concept, memQNames):
for rel in relSet.fromModelObject(concept):
dimConcept = rel.toModelObject
if isinstance(dimConcept, ModelConcept) and dimConcept.isDomainMember:
memQNames.add(dimConcept.qname)
EFMdimMems(relSet, dimConcept, memQNames)
return memQNames
| |
"""
Copyright Sergey Karayev - 2013.
Written during internship at Adobe CTL, San Francisco.
Every feature imported in features/__init__.py must take
image_filenames and image_ids, and returns good_image_ids and feats,
where good_image_ids may be a subset of image_ids.
"""
import os
import sys
import functools
import pandas as pd
import numpy as np
import cPickle
import bson
import vislab.utils.cmdline
import vislab.dataset
import vislab.features
import vislab.vw3
import vislab.feature
DB_NAME = 'vislab_feats'
FEATURES = {
# does not look at image at all:
'noise': {
'cpus_per_task': 1, 'mem': 1000, 'chunk_size': -1, 'fn': None
},
# python:
'size': {
'fn': vislab.features.size,
'cpus_per_task': 1, 'mem': 1000, 'chunk_size': -1
},
'gist_256': {
'fn': functools.partial(vislab.features.gist, max_size=256),
'cpus_per_task': 2, 'mem': 2000, 'chunk_size': -1
},
# caffe:
'caffe_imagenet': {
'fn': functools.partial(vislab.features.caffe, layer='prob'),
'cpus_per_task': 4, 'mem': 3000, 'chunk_size': 30,
},
'caffe_fc6': {
'fn': functools.partial(vislab.features.caffe, layer='fc6'),
'cpus_per_task': 4, 'mem': 3000, 'chunk_size': 30,
},
'caffe_fc7': {
'fn': functools.partial(vislab.features.caffe, layer='fc7'),
'cpus_per_task': 4, 'mem': 3000, 'chunk_size': 30,
},
# matlab:
'dsift_llc_1000': {
'fn': vislab.features.dsift_llc,
'cpus_per_task': 3, 'mem': 3000, 'chunk_size': 20
},
'lab_hist': {
'fn': vislab.features.lab_hist,
'cpus_per_task': 4, 'mem': 3000, 'chunk_size': 30
},
'mc_bit': {
'fn': vislab.features.mc_bit,
'cpus_per_task': 1, 'mem': 7000, 'chunk_size': 10
},
'gbvs_saliency': {
'fn': vislab.features.gbvs_saliency,
'cpus_per_task': 4, 'mem': 3000, 'chunk_size': 10
},
}
def extract_features(
dataset_df, dataset_name, feat_name, force=False,
mem=3000, cpus_per_task=2, num_workers=1):
"""
Extract features for each image in a list of image ids.
Only those images that do not already have feature information are
processed.
Features are stored in a collection in the features mongo database
as they are computed.
Parameters
----------
dataset_df: pandas.DataFrame
dataset_name: string
feat_name: string
Must be in KNOWN_FEATURES.
force: boolean [False]
Compute feature even if it is already in database.
mem: int [3000]
cpus_per_task: int [2]
num_workers: int [1]
Returns
-------
features: ndarray
"""
# Check that the settings are valid.
assert(feat_name in FEATURES and
cpus_per_task >= FEATURES[feat_name]['cpus_per_task'] and
mem >= FEATURES[feat_name]['mem'])
# Determine the cache filename, thereby creating the right directory
dirname = vislab.util.makedirs('{}/{}'.format(
vislab.config['paths']['feats'], dataset_name))
h5_filename = '{}/{}.h5'.format(dirname, feat_name)
collection = _get_feat_collection(dataset_name, feat_name)
collection.ensure_index('image_id')
# Exclude ids that already have computed features in the database.
image_ids = dataset_df.index.tolist()
if not force:
computed_image_ids = [
x['image_id'] for x in collection.find(fields=['image_id'])]
num_ids = len(image_ids)
image_ids = list(set(image_ids) - set(computed_image_ids))
print("Cut down on {} existing out of {} total image ids.".format(
num_ids - len(image_ids), num_ids))
if len(image_ids) < 1:
return
# Features that are computed without any input are a special case.
if feat_name == 'noise':
X = np.random.rand(dataset_df.shape[0], 2).astype('float32')
df = pd.DataFrame(
data={'feat': [row for row in X]},
index=dataset_df.index)
df.to_hdf(h5_filename, 'df', mode='w')
return
# Next, everything else.
num_chunks = max(1, len(image_ids) / FEATURES[feat_name]['chunk_size'])
id_chunks = np.array_split(image_ids, num_chunks)
args_list = [
(id_chunk.tolist(), feat_name, dataset_name)
for id_chunk in id_chunks
]
vislab.utils.distributed.map_through_rq(
vislab.feature._extract_features_for_image_ids, args_list,
dataset_name + '_' + feat_name,
num_workers=num_workers, mem=mem, cpus_per_task=cpus_per_task,
async=(num_workers > 1))
def _extract_features_for_image_ids(
image_ids, feat_name, dataset_name):
"""
Download images, compute features, and store to database, for the
given list of image_ids in dataset_name, with feat_name.
"""
collection = _get_feat_collection(dataset_name, feat_name)
image_filenames = vislab.dataset.fetch_image_filenames_for_ids(
image_ids, dataset_name)
if len(image_ids) == 0:
print("Could not load any images from {}".format(image_ids))
return
image_ids, feats = FEATURES[feat_name]['fn'](image_ids, image_filenames)
_store_in_db(collection, image_ids, feats)
def _get_feat_collection(dataset_name, feat_name):
db_name = '{}_{}'.format(DB_NAME, dataset_name)
return vislab.util.get_mongodb_client()[db_name][feat_name]
def _store_in_db(collection, image_ids, feats):
"""
Store the given features for the given ids in the database.
Parameters
----------
collection: pymongo.Collection
image_ids: list of string
feats: list of ndarray
"""
for image_id, feat in zip(image_ids, feats):
collection.update({'image_id': image_id}, {
'image_id': image_id,
'feat': bson.Binary(cPickle.dumps(feat, protocol=2))
}, upsert=True)
def _cache_to_h5(
dataset_name, image_ids, feat_name, standardize=False, force=False):
print force
dirname = vislab.util.makedirs(
os.path.join(vislab.config['paths']['feats'], dataset_name))
filename = '{}/{}.h5'.format(dirname, feat_name)
if not force and os.path.exists(filename):
print("Cached file for {}: {} already exists.".format(
dataset_name, feat_name))
return
collection = _get_feat_collection(dataset_name, feat_name)
print('{} records in collection'.format(collection.count()))
cursor = collection.find({'image_id': {'$in': image_ids}})
print("{} records in cursor".format(cursor.count()))
image_ids_ = []
feats = []
for document in cursor:
image_ids_.append(str(document['image_id']))
feat = cPickle.loads(document['feat'])
if feat.dtype in [float, 'float32', 'float64']:
feat = feat.astype('float32')
feats.append(feat)
# Drop all feature vectors with NaN's
df = pd.DataFrame(np.vstack(feats), image_ids_)
df = df.dropna()
# drop duplicates
df['image_id'] = df.index
df = df.drop_duplicates('image_id')
del df['image_id']
print("{} rows remain after duplicates removed".format(df.shape[0]))
if standardize and feat.dtype == 'float32':
print("Standardizing data")
df.values = (df.values - df.values.mean(0)) / df.values.std(0)
df.to_hdf(filename, 'df', complib='blosc')
def _cache_to_vw(
dataset_name, image_ids, feature_name, standardize=None, force=False):
"""
Output VW-formatted feature values to GZIP'd file for the given image ids.
If h5 cache of features exists, writes from there.
If not, writes directly from database.
"""
assert(feature_name in FEATURES)
dirname = os.path.join(vislab.config['paths']['feats'], dataset_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = '{}/{}.txt.gz'.format(dirname, feature_name)
if not force and os.path.exists(filename):
print("Cached file already exists.")
return
h5_filename = '{}/{}/{}.h5'.format(
vislab.config['paths']['feats'], dataset_name, feature_name)
# If HDF5 cache exists, write from it, shuffling lines.
if os.path.exists(h5_filename):
_write_features_for_vw_from_h5(
image_ids, dataset_name, feature_name, filename)
# Otherwise, write from the database.
else:
args = (image_ids, dataset_name, feature_name)
func = _write_features_for_vw_from_db
print("Outputting to gzipped feature file {}".format(filename))
python_cmd = 'python -c "{}"'.format(
vislab.util.pickle_function_call(func, args))
cmd = "{} | gzip > {}".format(python_cmd, filename)
print(cmd)
vislab.util.run_through_bash_script([cmd])
def _write_features_for_vw_from_h5(
image_ids, dataset_name, feature_name, output_filename):
"""
Does not standardize features, assumes already standardized.
"""
h5_filename = '{}/{}/{}.h5'.format(
vislab.config['paths']['feats'], dataset_name, feature_name)
df = pd.read_hdf(h5_filename, 'df')
df.index = df.index.astype(str)
sys.stderr.write(
"_write_features_for_vw_from_h5: Count for feature {}: {}\n".format(
feature_name, df.shape[0]))
# NOTE: this line is necessary... saw segfaults without it
good_ids = [x for x in image_ids if x in df.index]
df = df.ix[good_ids]
df = df.dropna()
# Shuffle!
df = df.iloc[np.random.permutation(df.shape[0])]
vislab.vw3.write_data_in_vw_format(df, feature_name, output_filename)
def _write_features_for_vw_from_db(image_ids, dataset_name, feature_name):
# Connect to DB and get a cursor with all image_ids.
collection = _get_feat_collection(dataset_name, feature_name)
cursor = collection.find({'image_id': {'$in': image_ids}})
fn_name = '_write_features_for_vw_from_db'
sys.stderr.write("{}: Count for feature {}: {}. Matching ids: {}\n".format(
fn_name, feature_name, collection.count(), cursor.count()))
# Output VW-formatted features:
# id |feature_name ind:val ind:val ind:val ...
for document in cursor:
feat = cPickle.loads(document['feat'])
s = vislab.vw3._feat_for_vw(document['image_id'], feature_name, feat)
sys.stdout.write(s + '\n')
## Command-line interface.
def compute(args=None):
"""
Extract features of the requested type for all images in AVA.
"""
if args is None:
args = vislab.utils.cmdline.get_args(
'feature', 'compute', ['dataset', 'processing', 'feature'])
df = vislab.dataset.get_df_with_args(args)
for feature in args.features:
extract_features(
df, args.dataset, feature, args.force_features,
args.mem, args.cpus_per_task, args.num_workers)
def cache_to_h5(args=None):
"""
Output features in the database for the ids in the loaded dataset to
HDF5 cache file, one for each type of feature.
"""
_cache(_cache_to_h5, 'h5', args)
def cache_to_vw(args=None):
"""
Output features in the database for the ids in the loaded dataset to
VW format gzip file, one for each type of feature.
"""
_cache(_cache_to_vw, 'vw', args)
def _cache(fn, name, args=None):
if args is None:
args = vislab.utils.cmdline.get_args(
'feature', 'cache_to_{}'.format(name),
['dataset', 'processing', 'feature']
)
df = vislab.dataset.get_df_with_args(args)
image_ids = df.index.tolist()
for feature in args.features:
fn(args.dataset, image_ids, feature,
args.standardize, args.force_features)
if __name__ == '__main__':
possible_functions = {
'compute': compute,
'cache_to_h5': cache_to_h5,
'cache_to_vw': cache_to_vw
}
vislab.utils.cmdline.run_function_in_file(__file__, possible_functions)
| |
# Copyright 2010 Google Inc.
# All Rights Reserved.
#
# Author: Tim Haloun (thaloun@google.com)
# Daniel Petersson (dape@google.com)
#
import os
# Keep a global dictionary of library target params for lookups in
# ExtendComponent().
_all_lib_targets = {}
def _GenericLibrary(env, static, **kwargs):
"""Extends ComponentLibrary to support multiplatform builds
of dynamic or static libraries.
Args:
env: The environment object.
kwargs: The keyword arguments.
Returns:
See swtoolkit ComponentLibrary
"""
params = CombineDicts(kwargs, {'COMPONENT_STATIC': static})
return ExtendComponent(env, 'ComponentLibrary', **params)
def Library(env, **kwargs):
"""Extends ComponentLibrary to support multiplatform builds of static
libraries.
Args:
env: The current environment.
kwargs: The keyword arguments.
Returns:
See swtoolkit ComponentLibrary
"""
return _GenericLibrary(env, True, **kwargs)
def DynamicLibrary(env, **kwargs):
"""Extends ComponentLibrary to support multiplatform builds
of dynmic libraries.
Args:
env: The environment object.
kwargs: The keyword arguments.
Returns:
See swtoolkit ComponentLibrary
"""
return _GenericLibrary(env, False, **kwargs)
def Object(env, **kwargs):
return ExtendComponent(env, 'ComponentObject', **kwargs)
def Unittest(env, **kwargs):
"""Extends ComponentTestProgram to support unittest built
for multiple platforms.
Args:
env: The current environment.
kwargs: The keyword arguments.
Returns:
See swtoolkit ComponentProgram.
"""
kwargs['name'] = kwargs['name'] + '_unittest'
common_test_params = {
'posix_cppdefines': ['GUNIT_NO_GOOGLE3', 'GTEST_HAS_RTTI=0'],
'libs': ['unittest_main', 'gunit']
}
if not kwargs.has_key('explicit_libs'):
common_test_params['win_libs'] = [
'advapi32',
'crypt32',
'iphlpapi',
'secur32',
'shell32',
'shlwapi',
'user32',
'wininet',
'ws2_32'
]
common_test_params['lin_libs'] = [
'crypto',
'pthread',
'ssl',
]
params = CombineDicts(kwargs, common_test_params)
return ExtendComponent(env, 'ComponentTestProgram', **params)
def App(env, **kwargs):
"""Extends ComponentProgram to support executables with platform specific
options.
Args:
env: The current environment.
kwargs: The keyword arguments.
Returns:
See swtoolkit ComponentProgram.
"""
if not kwargs.has_key('explicit_libs'):
common_app_params = {
'win_libs': [
'advapi32',
'crypt32',
'iphlpapi',
'secur32',
'shell32',
'shlwapi',
'user32',
'wininet',
'ws2_32'
]}
params = CombineDicts(kwargs, common_app_params)
else:
params = kwargs
return ExtendComponent(env, 'ComponentProgram', **params)
def WiX(env, **kwargs):
""" Extends the WiX builder
Args:
env: The current environment.
kwargs: The keyword arguments.
Returns:
The node produced by the environment's wix builder
"""
return ExtendComponent(env, 'WiX', **kwargs)
def Repository(env, at, path):
"""Maps a directory external to $MAIN_DIR to the given path so that sources
compiled from it end up in the correct place under $OBJ_DIR. NOT required
when only referring to header files.
Args:
env: The current environment object.
at: The 'mount point' within the current directory.
path: Path to the actual directory.
"""
env.Dir(at).addRepository(env.Dir(path))
def Components(*paths):
"""Completes the directory paths with the correct file
names such that the directory/directory.scons name
convention can be used.
Args:
paths: The paths to complete. If it refers to an existing
file then it is ignored.
Returns:
The completed lif scons files that are needed to build talk.
"""
files = []
for path in paths:
if os.path.isfile(path):
files.append(path)
else:
files.append(ExpandSconsPath(path))
return files
def ExpandSconsPath(path):
"""Expands a directory path into the path to the
scons file that our build uses.
Ex: magiflute/plugin/common => magicflute/plugin/common/common.scons
Args:
path: The directory path to expand.
Returns:
The expanded path.
"""
return '%s/%s.scons' % (path, os.path.basename(path))
def AddMediaLibs(env, **kwargs):
lmi_libdir = '$GOOGLE3/../googleclient/third_party/lmi/files/lib/'
if env.Bit('windows'):
if env.get('COVERAGE_ENABLED'):
lmi_libdir += 'win32/c_only'
else:
lmi_libdir += 'win32/Release'
elif env.Bit('mac'):
lmi_libdir += 'macos'
elif env.Bit('linux'):
lmi_libdir += 'linux/x86'
AddToDict(kwargs, 'libdirs', [
'$MAIN_DIR/third_party/gips/Libraries/',
lmi_libdir,
])
gips_lib = ''
if env.Bit('windows'):
if env.Bit('debug'):
gips_lib = 'gipsvoiceenginelib_mtd'
else:
gips_lib = 'gipsvoiceenginelib_mt'
elif env.Bit('mac'):
gips_lib = 'VoiceEngine_mac_universal_gcc'
elif env.Bit('linux'):
gips_lib = 'VoiceEngine_Linux_gcc'
AddToDict(kwargs, 'libs', [
gips_lib,
'LmiAudioCommon',
'LmiClient',
'LmiCmcp',
'LmiDeviceManager',
'LmiH263ClientPlugIn',
'LmiH263CodecCommon',
'LmiH263Decoder',
'LmiH263Encoder',
'LmiH264ClientPlugIn',
'LmiH264CodecCommon',
'LmiH264Common',
'LmiH264Decoder',
'LmiH264Encoder',
'LmiIce',
'LmiMediaPayload',
'LmiOs',
'LmiPacketCache',
'LmiProtocolStack',
'LmiRateShaper',
'LmiRtp',
'LmiSecurity',
'LmiSignaling',
'LmiStun',
'LmiTransport',
'LmiUi',
'LmiUtils',
'LmiVideoCommon',
'LmiXml',
])
if env.Bit('windows'):
AddToDict(kwargs, 'libs', [
'dsound',
'd3d9',
'gdi32',
'strmiids',
])
if env.Bit('mac'):
AddToDict(kwargs, 'FRAMEWORKS', [
'AudioToolbox',
'AudioUnit',
'Cocoa',
'CoreAudio',
'CoreFoundation',
'IOKit',
'QTKit',
'QuickTime',
'QuartzCore',
])
return kwargs
def ReadVersion(filename):
"""Executes the supplied file and pulls out a version definition from it. """
defs = {}
execfile(str(filename), defs)
if not defs.has_key('version'):
return '0.0.0.0'
version = defs['version']
parts = version.split(',')
build = os.environ.get('GOOGLE_VERSION_BUILDNUMBER')
if build:
parts[-1] = str(build)
return '.'.join(parts)
#-------------------------------------------------------------------------------
# Helper methods for translating talk.Foo() declarations in to manipulations of
# environmuent construction variables, including parameter parsing and merging,
#
def GetEntry(dict, key):
"""Get the value from a dictionary by key. If the key
isn't in the dictionary then None is returned. If it is in
the dictionaruy the value is fetched and then is it removed
from the dictionary.
Args:
key: The key to get the value for.
kwargs: The keyword argument dictionary.
Returns:
The value or None if the key is missing.
"""
value = None
if dict.has_key(key):
value = dict[key]
dict.pop(key)
return value
def MergeAndFilterByPlatform(env, params):
"""Take a dictionary of arguments to lists of values, and, depending on
which platform we are targetting, merge the lists of associated keys.
Merge by combining value lists like so:
{win_foo = [a,b], lin_foo = [c,d], foo = [e], mac_bar = [f], bar = [g] }
becomes {foo = [a,b,e], bar = [g]} on windows, and
{foo = [e], bar = [f,g]} on mac
Args:
env: The hammer environment which knows which platforms are active
params: The keyword argument dictionary.
Returns:
A new dictionary with the filtered and combined entries of params
"""
platforms = {
'linux': 'lin_',
'mac': 'mac_',
'posix': 'posix_',
'windows': 'win_',
}
active_prefixes = [
platforms[x] for x in iter(platforms) if env.Bit(x)
]
inactive_prefixes = [
platforms[x] for x in iter(platforms) if not env.Bit(x)
]
merged = {}
for arg, values in params.iteritems():
inactive_platform = False
key = arg
for prefix in active_prefixes:
if arg.startswith(prefix):
key = arg[len(prefix):]
for prefix in inactive_prefixes:
if arg.startswith(prefix):
inactive_platform = True
if inactive_platform:
continue
AddToDict(merged, key, values)
return merged
# Linux can build both 32 and 64 bit on 64 bit host, but 32 bit host can
# only build 32 bit. For 32 bit debian installer a 32 bit host is required.
# ChromeOS (linux) ebuild don't support 64 bit and requires 32 bit build only
# for now.
def Allow64BitCompile(env):
return (env.Bit('linux') and env.Bit('platform_arch_64bit')
)
def MergeSettingsFromLibraryDependencies(env, params):
if params.has_key('libs'):
for lib in params['libs']:
if (_all_lib_targets.has_key(lib) and
_all_lib_targets[lib].has_key('dependent_target_settings')):
params = CombineDicts(
params,
MergeAndFilterByPlatform(
env,
_all_lib_targets[lib]['dependent_target_settings']))
return params
def ExtendComponent(env, component, **kwargs):
"""A wrapper around a scons builder function that preprocesses and post-
processes its inputs and outputs. For example, it merges and filters
certain keyword arguments before appending them to the environments
construction variables. It can build signed targets and 64bit copies
of targets as well.
Args:
env: The hammer environment with which to build the target
component: The environment's builder function, e.g. ComponentProgram
kwargs: keyword arguments that are either merged, translated, and passed on
to the call to component, or which control execution.
TODO(): Document the fields, such as cppdefines->CPPDEFINES,
prepend_includedirs, include_talk_media_libs, etc.
Returns:
The output node returned by the call to component, or a subsequent signed
dependant node.
"""
env = env.Clone()
# prune parameters intended for other platforms, then merge
params = MergeAndFilterByPlatform(env, kwargs)
# get the 'target' field
name = GetEntry(params, 'name')
# save pristine params of lib targets for future reference
if 'ComponentLibrary' == component:
_all_lib_targets[name] = dict(params)
# add any dependent target settings from library dependencies
params = MergeSettingsFromLibraryDependencies(env, params)
# if this is a signed binary we need to make an unsigned version first
signed = env.Bit('windows') and GetEntry(params, 'signed')
if signed:
name = 'unsigned_' + name
# add default values
if GetEntry(params, 'include_talk_media_libs'):
params = AddMediaLibs(env, **params)
# potentially exit now
srcs = GetEntry(params, 'srcs')
if not srcs or not hasattr(env, component):
return None
# apply any explicit dependencies
dependencies = GetEntry(params, 'depends')
if dependencies is not None:
env.Depends(name, dependencies)
# put the contents of params into the environment
# some entries are renamed then appended, others renamed then prepended
appends = {
'cppdefines' : 'CPPDEFINES',
'libdirs' : 'LIBPATH',
'link_flags' : 'LINKFLAGS',
'libs' : 'LIBS',
'FRAMEWORKS' : 'FRAMEWORKS',
}
prepends = {}
if env.Bit('windows'):
# MSVC compile flags have precedence at the beginning ...
prepends['ccflags'] = 'CCFLAGS'
else:
# ... while GCC compile flags have precedence at the end
appends['ccflags'] = 'CCFLAGS'
if GetEntry(params, 'prepend_includedirs'):
prepends['includedirs'] = 'CPPPATH'
else:
appends['includedirs'] = 'CPPPATH'
for field, var in appends.items():
values = GetEntry(params, field)
if values is not None:
env.Append(**{var : values})
for field, var in prepends.items():
values = GetEntry(params, field)
if values is not None:
env.Prepend(**{var : values})
# workaround for pulse stripping link flag for unknown reason
if Allow64BitCompile(env):
env['SHLINKCOM'] = ('$SHLINK -o $TARGET -m32 $SHLINKFLAGS $SOURCES '
'$_LIBDIRFLAGS $_LIBFLAGS')
env['LINKCOM'] = ('$LINK -o $TARGET -m32 $LINKFLAGS $SOURCES '
'$_LIBDIRFLAGS $_LIBFLAGS')
# any other parameters are replaced without renaming
for field, value in params.items():
env.Replace(**{field : value})
# invoke the builder function
builder = getattr(env, component)
node = builder(name, srcs)
# make a parallel 64bit version if requested
if Allow64BitCompile(env) and GetEntry(params, 'also64bit'):
env_64bit = env.Clone()
env_64bit.FilterOut(CCFLAGS = ['-m32'], LINKFLAGS = ['-m32'])
env_64bit.Prepend(CCFLAGS = ['-m64', '-fPIC'], LINKFLAGS = ['-m64'])
name_64bit = name + '64'
env_64bit.Replace(OBJSUFFIX = '64' + env_64bit['OBJSUFFIX'])
env_64bit.Replace(SHOBJSUFFIX = '64' + env_64bit['SHOBJSUFFIX'])
if ('ComponentProgram' == component or
('ComponentLibrary' == component and
env_64bit['COMPONENT_STATIC'] == False)):
# link 64 bit versions of libraries
libs = []
for lib in env_64bit['LIBS']:
if (_all_lib_targets.has_key(lib) and
_all_lib_targets[lib].has_key('also64bit')):
libs.append(lib + '64')
else:
libs.append(lib)
env_64bit.Replace(LIBS = libs)
env_64bit['SHLINKCOM'] = ('$SHLINK -o $TARGET -m64 $SHLINKFLAGS $SOURCES '
'$_LIBDIRFLAGS $_LIBFLAGS')
env_64bit['LINKCOM'] = ('$LINK -o $TARGET -m64 $LINKFLAGS $SOURCES '
'$_LIBDIRFLAGS $_LIBFLAGS')
builder = getattr(env_64bit, component)
nodes = [node, builder(name_64bit, srcs)]
return nodes
if signed: # Note currently incompatible with 64Bit flag
# Get the name of the built binary, then get the name of the final signed
# version from it. We need the output path since we don't know the file
# extension beforehand.
target = node[0].path.split('_', 1)[1]
signed_node = env.SignedBinary(
source = node,
target = '$STAGING_DIR/' + target,
)
env.Alias('signed_binaries', signed_node)
return signed_node
return node
def AddToDict(dictionary, key, values, append=True):
"""Merge the given key value(s) pair into a dictionary. If it contains an
entry with that key already, then combine by appending or prepending the
values as directed. Otherwise, assign a new keyvalue pair.
"""
if values is None:
return
if not dictionary.has_key(key):
dictionary[key] = values
return
cur = dictionary[key]
# TODO: Make sure that there are no duplicates
# in the list. I can't use python set for this since
# the nodes that are returned by the SCONS builders
# are not hashable.
# dictionary[key] = list(set(cur).union(set(values)))
if append:
dictionary[key] = cur + values
else:
dictionary[key] = values + cur
def CombineDicts(a, b):
"""Unions two dictionaries by combining values of keys shared between them.
"""
c = {}
for key in a:
if b.has_key(key):
c[key] = a[key] + b.pop(key)
else:
c[key] = a[key]
for key in b:
c[key] = b[key]
return c
def RenameKey(d, old, new, append=True):
AddToDict(d, new, GetEntry(d, old), append)
| |
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.core.paginator import InvalidPage, Paginator
from django.db import models
from django.http import HttpResponseRedirect
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from xadmin.util import lookup_field, display_for_field, label_for_field, boolean_icon
from base import ModelAdminView, filter_hook, inclusion_tag, csrf_protect_m
# List settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
PAGE_VAR = 'p'
TO_FIELD_VAR = 't'
COL_LIST_VAR = '_cols'
ERROR_FLAG = 'e'
DOT = '.'
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = _('None')
class FakeMethodField(object):
"""
This class used when a column is an model function, wrap function as a fake field to display in select columns.
"""
def __init__(self, name, verbose_name):
# Initial comm field attrs
self.name = name
self.verbose_name = verbose_name
self.primary_key = False
class ResultRow(dict):
pass
class ResultItem(object):
def __init__(self, field_name, row):
self.classes = []
self.text = ' '
self.wraps = []
self.tag = 'td'
self.tag_attrs = []
self.allow_tags = False
self.btns = []
self.menus = []
self.is_display_link = False
self.row = row
self.field_name = field_name
self.field = None
self.attr = None
self.value = None
@property
def label(self):
text = mark_safe(
self.text) if self.allow_tags else conditional_escape(self.text)
if force_unicode(text) == '':
text = mark_safe(' ')
for wrap in self.wraps:
text = mark_safe(wrap % text)
return text
@property
def tagattrs(self):
return mark_safe(
'%s%s' % ((self.tag_attrs and ' '.join(self.tag_attrs) or ''),
(self.classes and (' class="%s"' % ' '.join(self.classes)) or '')))
class ResultHeader(ResultItem):
def __init__(self, field_name, row):
super(ResultHeader, self).__init__(field_name, row)
self.tag = 'th'
self.tag_attrs = ['scope="col"']
self.sortable = False
self.allow_tags = True
self.sorted = False
self.ascending = None
self.sort_priority = None
self.url_primary = None
self.url_remove = None
self.url_toggle = None
class ListAdminView(ModelAdminView):
"""
Display models objects view. this class has ordering and simple filter features.
"""
list_display = ('__str__',)
list_display_links = ()
list_select_related = False
list_per_page = 50
list_max_show_all = 200
list_exclude = ()
search_fields = ()
paginator_class = Paginator
ordering = None
# Change list templates
object_list_template = None
def init_request(self, *args, **kwargs):
if not self.has_view_permission():
raise PermissionDenied
request = self.request
self.pk_attname = self.opts.pk.attname
self.lookup_opts = self.opts
self.list_display = self.get_list_display()
self.list_display_links = self.get_list_display_links()
# Get page number parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
# Get params from request
self.show_all = ALL_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
@filter_hook
def get_list_display(self):
"""
Return a sequence containing the fields to be displayed on the list.
"""
self.base_list_display = COL_LIST_VAR in self.request.GET and self.request.GET[COL_LIST_VAR].split('.') or self.list_display
return list(self.base_list_display)
@filter_hook
def get_list_display_links(self):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or not self.list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(self.list_display)[:1]
def make_result_list(self):
# Get search parameters from the query string.
self.base_queryset = self.queryset()
self.list_queryset = self.get_list_queryset()
self.ordering_field_columns = self.get_ordering_field_columns()
self.paginator = self.get_paginator()
# Get the number of objects, with admin filters applied.
self.result_count = self.paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if not self.list_queryset.query.where:
self.full_result_count = self.result_count
else:
self.full_result_count = self.base_queryset.count()
self.can_show_all = self.result_count <= self.list_max_show_all
self.multi_page = self.result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and self.can_show_all) or not self.multi_page:
self.result_list = self.list_queryset._clone()
else:
try:
self.result_list = self.paginator.page(
self.page_num + 1).object_list
except InvalidPage:
if ERROR_FLAG in self.request.GET.keys():
return SimpleTemplateResponse('xadmin/views/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(self.request.path + '?' + ERROR_FLAG + '=1')
self.has_more = self.result_count > (
self.list_per_page * self.page_num + len(self.result_list))
@filter_hook
def get_result_list(self):
return self.make_result_list()
@filter_hook
def post_result_list(self):
return self.make_result_list()
@filter_hook
def get_list_queryset(self):
"""
Get model queryset. The query has been filted and ordered.
"""
# First, get queryset from base class.
queryset = self.queryset()
# Use select_related() if one of the list_display options is a field
# with a relationship and the provided queryset doesn't already have
# select_related defined.
if not queryset.query.select_related:
if self.list_select_related:
queryset = queryset.select_related()
else:
for field_name in self.list_display:
try:
field = self.opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(field.rel, models.ManyToOneRel):
queryset = queryset.select_related()
break
# Then, set queryset ordering.
queryset = queryset.order_by(*self.get_ordering())
# Return the queryset.
return queryset
# List ordering
def _get_default_ordering(self):
ordering = []
if self.ordering:
ordering = self.ordering
elif self.opts.ordering:
ordering = self.opts.ordering
return ordering
@filter_hook
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.opts.get_field(field_name)
return field.name
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self, field_name):
attr = getattr(self, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
@filter_hook
def get_ordering(self):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
ordering = list(super(ListAdminView, self).get_ordering()
or self._get_default_ordering())
if ORDER_VAR in self.params and self.params[ORDER_VAR]:
# Clear ordering and used params
ordering = [pfx + self.get_ordering_field(field_name) for n, pfx, field_name in
map(
lambda p: p.rpartition('-'),
self.params[ORDER_VAR].split('.'))
if self.get_ordering_field(field_name)]
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.opts.pk.name
if not (set(ordering) & set(['pk', '-pk', pk_name, '-' + pk_name])):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
@filter_hook
def get_ordering_field_columns(self):
"""
Returns a SortedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = SortedDict()
if ORDER_VAR not in self.params or not self.params[ORDER_VAR]:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for attr in self.list_display:
if self.get_ordering_field(attr) == field:
ordering_fields[field] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, field_name = p.rpartition('-')
ordering_fields[field_name] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_check_field_url(self, f):
"""
Return the select column menu items link.
We must use base_list_display, because list_display maybe changed by plugins.
"""
fields = [fd for fd in self.base_list_display if fd != f.name]
if len(self.base_list_display) == len(fields):
if f.primary_key:
fields.insert(0, f.name)
else:
fields.append(f.name)
return self.get_query_string({COL_LIST_VAR: '.'.join(fields)})
def get_model_method_fields(self):
"""
Return the fields info defined in model. use FakeMethodField class wrap method as a db field.
"""
methods = []
for name in dir(self):
try:
if getattr(getattr(self, name), 'is_column', False):
methods.append((name, getattr(self, name)))
except:
pass
return [FakeMethodField(name, getattr(method, 'short_description', capfirst(name.replace('_', ' '))))
for name, method in methods]
@filter_hook
def get_context(self):
"""
Prepare the context for templates.
"""
self.title = _('%s List') % force_unicode(self.opts.verbose_name)
model_fields = [(f, f.name in self.list_display, self.get_check_field_url(f))
for f in (self.opts.fields + self.get_model_method_fields()) if f.name not in self.list_exclude]
new_context = {
'module_name': force_unicode(self.opts.verbose_name_plural),
'title': self.title,
'cl': self,
'model_fields': model_fields,
'clean_select_field_url': self.get_query_string(remove=[COL_LIST_VAR]),
'has_add_permission': self.has_add_permission(),
'app_label': self.app_label,
'brand_name': self.opts.verbose_name_plural,
'brand_icon': self.get_model_icon(self.model),
'add_url': self.model_admin_url('add'),
'result_headers': self.result_headers(),
'results': self.results()
}
context = super(ListAdminView, self).get_context()
context.update(new_context)
return context
@filter_hook
def get_response(self, context, *args, **kwargs):
pass
@csrf_protect_m
@filter_hook
def get(self, request, *args, **kwargs):
"""
The 'change list' admin view for this model.
"""
response = self.get_result_list()
if response:
return response
context = self.get_context()
context.update(kwargs or {})
response = self.get_response(context, *args, **kwargs)
return response or TemplateResponse(request, self.object_list_template or
self.get_template_list('views/model_list.html'), context, current_app=self.admin_site.name)
@filter_hook
def post_response(self, *args, **kwargs):
pass
@csrf_protect_m
@filter_hook
def post(self, request, *args, **kwargs):
return self.post_result_list() or self.post_response(*args, **kwargs) or self.get(request, *args, **kwargs)
@filter_hook
def get_paginator(self):
return self.paginator_class(self.list_queryset, self.list_per_page, 0, True)
@filter_hook
def get_page_number(self, i):
if i == DOT:
return mark_safe(u'<span class="dot-page">...</span> ')
elif i == self.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i + 1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(self.get_query_string({PAGE_VAR: i})), (i == self.paginator.num_pages - 1 and ' class="end"' or ''), i + 1))
# Result List methods
@filter_hook
def result_header(self, field_name, row):
ordering_field_columns = self.ordering_field_columns
item = ResultHeader(field_name, row)
text, attr = label_for_field(field_name, self.model,
model_admin=self,
return_attr=True
)
item.text = text
item.attr = attr
if attr and not getattr(attr, "admin_order_field", None):
return item
# OK, it is sortable if we got this far
th_classes = ['sortable']
order_type = ''
new_order_type = 'desc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if field_name in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(field_name).lower()
sort_priority = ordering_field_columns.keys().index(field_name) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_asc = [] # URL for making this field the primary sort
o_list_desc = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == field_name: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_asc.insert(0, j)
o_list_desc.insert(0, '-' + j)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_asc.append(param)
o_list_desc.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if field_name not in ordering_field_columns:
o_list_asc.insert(0, field_name)
o_list_desc.insert(0, '-' + field_name)
item.sorted = sorted
item.sortable = True
item.ascending = (order_type == "asc")
item.sort_priority = sort_priority
menus = [
('asc', o_list_asc, 'caret-up', _(u'Sort ASC')),
('desc', o_list_desc, 'caret-down', _(u'Sort DESC')),
]
if sorted:
row['num_sorted_fields'] = row['num_sorted_fields'] + 1
menus.append((None, o_list_remove, 'remove', _(u'Cancel Sort')))
item.btns.append('<a class="toggle" href="%s"><i class="icon-%s"></i></a>' % (
self.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}), 'sort-up' if order_type == "asc" else 'sort-down'))
item.menus.extend(['<li%s><a href="%s" class="active"><i class="icon-%s"></i> %s</a></li>' %
(
(' class="active"' if sorted and order_type == i[
0] else ''),
self.get_query_string({ORDER_VAR: '.'.join(i[1])}), i[2], i[3]) for i in menus])
item.classes.extend(th_classes)
return item
@filter_hook
def result_headers(self):
"""
Generates the list column headers.
"""
row = ResultRow()
row['num_sorted_fields'] = 0
row.cells = [self.result_header(
field_name, row) for field_name in self.list_display]
return row
@filter_hook
def result_item(self, obj, field_name, row):
"""
Generates the actual list of data.
"""
item = ResultItem(field_name, row)
try:
f, attr, value = lookup_field(field_name, obj, self)
except (AttributeError, ObjectDoesNotExist):
item.text = EMPTY_CHANGELIST_VALUE
else:
if f is None:
item.allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
item.allow_tags = True
item.text = boolean_icon(value)
else:
item.text = smart_unicode(value)
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(obj, f.name)
if field_val is None:
item.text = EMPTY_CHANGELIST_VALUE
else:
item.text = field_val
else:
item.text = display_for_field(value, f)
if isinstance(f, models.DateField)\
or isinstance(f, models.TimeField)\
or isinstance(f, models.ForeignKey):
item.classes.append('nowrap')
item.field = f
item.attr = attr
item.value = value
# If list_display_links not defined, add the link tag to the first field
if (item.row['is_display_first'] and not self.list_display_links) \
or field_name in self.list_display_links:
url = self.url_for_result(obj)
item.row['is_display_first'] = False
item.wraps.append(u'<a href="%s">%%s</a>' % url)
return item
@filter_hook
def result_row(self, obj):
row = ResultRow()
row['is_display_first'] = True
row['object'] = obj
row.cells = [self.result_item(
obj, field_name, row) for field_name in self.list_display]
return row
@filter_hook
def results(self):
results = []
for obj in self.result_list:
results.append(self.result_row(obj))
return results
@filter_hook
def url_for_result(self, result):
if self.has_change_permission(result):
return self.model_admin_url("change", getattr(result, self.pk_attname))
else:
return self.model_admin_url("detail", getattr(result, self.pk_attname))
# Media
@filter_hook
def get_media(self):
return super(ListAdminView, self).get_media() + self.vendor('xadmin.page.list.js')
# Blocks
@inclusion_tag('xadmin/includes/pagination.html')
def block_pagination(self, context, nodes, page_type='normal'):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = self.paginator, self.page_num
pagination_required = (
not self.show_all or not self.can_show_all) and self.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = {'normal': 5, 'small': 3}.get(page_type, 3)
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(
range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(
range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(
paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = self.can_show_all and not self.show_all and self.multi_page
return {
'cl': self,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and self.get_query_string({ALL_VAR: ''}),
'page_range': map(self.get_page_number, page_range),
'ALL_VAR': ALL_VAR,
'1': 1,
}
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Utility to lift subgraphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import op_selector
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import compat
from tensorflow.python.util import object_identity
from tensorflow.python.util.tf_export import tf_export
UnliftableError = op_selector.UnliftableError
def _as_operation(op_or_tensor):
if isinstance(op_or_tensor, ops.Tensor):
return op_or_tensor.op
return op_or_tensor
def _constant_inputs(op_or_tensor):
return all(_as_operation(i).type == u"Const"
and not _as_operation(i).control_inputs
for i in op_selector.graph_inputs(_as_operation(op_or_tensor)))
# Represents an input to `copied_op` which must be updated once
# `old_graph_tensor` has been copied.
_InputMutation = collections.namedtuple(
"_InputMutation",
["copied_op", "input_index", "old_graph_tensor"])
# Represents a control input to `copied_op` which must be added once
# `old_graph_op` has been copied.
_ControlMutation = collections.namedtuple(
"_ControlMutation",
["copied_op", "old_graph_op"])
def _copy_non_source(op, graph, op_map, base_graph):
"""Copy an op directly to a given graph.
Generally `op`'s inputs should already have been copied. If this is not the
case, for example with v1 while_loops, then `_copy_non_source` inserts
placeholders for the unavailable Tensors and returns a list of required
mutations.
Args:
op: The op to be copied.
graph: The destination graph.
op_map: A dict mapping ops and tensors in the old graph to the new one.
base_graph: The graph we're copying from, for any necessary functions.
Returns:
A tuple of (required_inputs, required_control_inputs):
required_inputs:
A list of `_InputMutation` tuples containing inputs to `copied_op` which
must be updated once `old_graph_tensor` has been copied.
required_control_inputs:
A list of `_ControlMutation` tuples containing control inputs to
`copied_op` which must be added once `old_graph_op` has been copied.
"""
input_mutations = []
control_mutations = []
copied_inputs = []
for input_index, original_input in enumerate(op.inputs):
copied_input = op_map.get(original_input, None)
if copied_input is None:
# An input for this op is missing due to a loop in the graph. We'll insert
# a placeholder for now and return information about the required post-hoc
# mutation.
copied_input = array_ops.placeholder(
name="unused_control_flow_input",
shape=original_input.shape,
dtype=original_input.dtype)
input_mutations.append(
# `copied_op` is filled in below, after we've created it.
_InputMutation(copied_op=None,
input_index=input_index,
old_graph_tensor=original_input))
copied_inputs.append(copied_input)
copied_control_inputs = []
for original_control_input in op.control_inputs:
copied_control_input = op_map.get(original_control_input, None)
if copied_control_input is None:
control_mutations.append(
_ControlMutation(copied_op=None,
old_graph_op=original_control_input))
else:
copied_control_inputs.append(copied_control_input)
# Don't copy over nodes with _tpu_replicate attribute. This attributed is used
# to signal that the op was built inside a tpu_replicate context; if we're
# lifting it to another graph we're similarly lifting it into another context.
with ops.control_dependencies(copied_control_inputs), ops.device(op.device):
# pylint: disable=protected-access
f = base_graph._functions.get(op.type, None)
if f is not None and compat.as_str(f.name) not in graph._functions:
f.add_to_graph(graph)
# pylint: enable=protected-access
# Create a new op in the destination graph if it doesn't exist before.
copied_op = graph.create_op(
op_type=op.type,
inputs=copied_inputs,
dtypes=[x.dtype for x in op.outputs],
attrs={
key: value for key, value in op.node_def.attr.items()
if not key.startswith("_class") and
not key.startswith("_tpu_replicate")
}, # b/128981532.
name=op.name)
op_map[op] = copied_op
for i, o in enumerate(op.outputs):
op_map[o] = copied_op.outputs[i]
return ([mutation._replace(copied_op=copied_op)
for mutation in input_mutations],
[mutation._replace(copied_op=copied_op)
for mutation in control_mutations])
def _copy_source(s, graph, op_map, handle_captures, inverse_captures,
base_graph):
"""Create a source in a graph based on a Tensor from a different graph.
This function creates a placeholder analog of `s` in a graph with the
following behavior:
1) If s is a captured Tensor or Variable and handle_captures is set to True,
simply capture it in the new graph as well.
2) If s is a PlaceholderWithDefault whose default is a constant, preserve
said default in the new graph.
3) When applicable, copy resource variable metadata from `s` to the newly
created placeholder.
Args:
s: The source of interest.
graph: The destination graph.
op_map: A dict mapping ops and tensors in the old graph to the new one.
handle_captures: A boolean indicating whether to re-capture s in the new
graph or simply create a vanilla placeholder.
inverse_captures: A dict mapping s back to the Tensor or Variable that it
captures.
base_graph: The graph being copied from.
"""
if handle_captures and s in inverse_captures:
copied_placeholder = graph.capture(inverse_captures[s], name=s.op.name)
elif s.op.type == "PlaceholderWithDefault" and _constant_inputs(s):
# Copy the default value to the graph.
default_value = s.op.inputs[0]
unavailable_inputs, unavailable_control_inputs = _copy_non_source(
op=default_value.op, graph=graph, op_map=op_map,
base_graph=base_graph)
if unavailable_inputs or unavailable_control_inputs:
raise AssertionError(
"Could not copy source node {} because it has inputs."
.format(default_value))
with ops.device(s.op.device):
copied_placeholder = array_ops.placeholder_with_default(
input=op_map[default_value], shape=s.shape, name=s.op.name)
else:
with ops.device(s.op.device):
copied_placeholder = array_ops.placeholder(
dtype=s.dtype, shape=s.shape, name=s.op.name)
base_handle = resource_variable_ops.get_resource_handle_data(s)
if base_handle.shape_and_type:
resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access
copied_placeholder,
base_handle,
graph_mode=True)
op_map[s] = copied_placeholder
# Add an entry for the op of the source tensor so that if there are any nodes
# depending on that op via control dependencies it can work correctly.
op_map[s.op] = copied_placeholder.op
@tf_export("__internal__.lift_to_graph", v1=[])
def lift_to_graph(tensors,
graph,
sources=None,
disallowed_placeholders=None,
add_sources=False,
handle_captures=False,
base_graph=None,
op_map=None):
"""Copies the tensor and all its inputs recursively to the outer graph.
Args:
tensors: The Tensors to lift.
graph: The graph to lift to.
sources: Optional sequence of nodes to start from. If omitted the whole
subgraph which feeds into `init_tensor` is lifted.
disallowed_placeholders: An optional set of ops which may not appear in the
lifted graph. Defaults to all placeholders.
add_sources: A boolean indicating whether placeholders which are not in
sources should be allowed.
handle_captures: A boolean indicating whether to re-capture s in the new
graph or simply create a vanilla placeholder.
base_graph: The graph from which to lift ops. This will be inferred if not
specified.
op_map: A map contains all the existing nodes that have been lifted to the
destination graph, so they won't be lifted and copied again.
Returns:
A mapping from ops in the current default graph to ops in `graph`.
Raises:
UnliftableError: If a placeholder blocks lifting.
"""
variable_init_tensors = []
init_tensors = []
for tensor in tensors:
if isinstance(tensor, resource_variable_ops.ResourceVariable):
variable_init_tensors.append(tensor)
else:
init_tensors.append(tensor)
base_graph = base_graph or init_tensors[0].graph
op_map = op_map or object_identity.ObjectIdentityDictionary()
# Check that the initializer does not depend on any placeholders.
sources = object_identity.ObjectIdentitySet(sources or [])
visited_ops = set(x.op for x in sources)
op_outputs = collections.defaultdict(set)
# First we extract the subgraph between init_tensors and sources.
for init_tensor in init_tensors:
sources.update(op_selector.map_subgraph(
init_tensor=init_tensor,
sources=sources,
disallowed_placeholders=disallowed_placeholders,
visited_ops=visited_ops,
op_outputs=op_outputs,
add_sources=add_sources))
# Try to topologically sort the nodes we've extracted. Now we know how many of
# their outputs are part of this subgraph.
ops_to_copy = []
marked_ops = set([])
ops_to_visit = [_as_operation(t) for t in init_tensors
if not op_outputs[_as_operation(t)]]
unvisited_ops = set(ops_to_visit)
while unvisited_ops:
while ops_to_visit:
op = ops_to_visit.pop()
if op in marked_ops:
continue
marked_ops.add(op)
ops_to_copy.append(op)
for inp in op_selector.graph_inputs(op):
# Don't lift the TPUReplicateMetadata nodes out of the function, because
# it has no registered kernels.
if inp.type == "TPUReplicateMetadata":
continue
unvisited_ops.add(inp)
if (all(x in marked_ops for x in op_outputs[inp]) and
inp not in sources):
ops_to_visit.append(inp)
unvisited_ops.difference_update(marked_ops)
if unvisited_ops:
# `unvisited_ops` should only have elements if the graph has a loop. In
# this case we want to keep copying and there's no topological ordering;
# we'll do ugly post-hoc mutations instead.
ops_to_visit.append(next(iter(unvisited_ops)))
# When lifting from one FuncGraph to another, we will need to capture the
# relevant tensors as well.
captures = []
inverse_captures = object_identity.ObjectIdentityDictionary()
internal_captures = []
if (isinstance(base_graph, func_graph.FuncGraph) and
isinstance(graph, func_graph.FuncGraph)):
captures = base_graph.captures
for external_capture, internal_capture in captures:
inverse_captures[internal_capture] = external_capture
internal_captures = base_graph.internal_captures
# ops_to_copy now holds a reverse topologically sorted list of ops which
# ends in the initializer. We copy those to the outermost graph and
# build the initialization op there.
with graph.as_default():
for i in variable_init_tensors:
op_map[i] = i
source_ops = set()
# Add the sources in the same order as the original graph.
for s in internal_captures:
if s in sources:
sources.remove(s)
source_ops.add(s.op)
_copy_source(
s=s,
graph=graph,
op_map=op_map,
handle_captures=handle_captures,
inverse_captures=inverse_captures,
base_graph=base_graph)
for s in sources:
source_ops.add(s.op)
_copy_source(
s=s,
graph=graph,
op_map=op_map,
handle_captures=handle_captures,
inverse_captures=inverse_captures,
base_graph=base_graph)
input_mutations = []
control_mutations = []
for op in reversed(ops_to_copy):
if op in source_ops or op in op_map:
continue
new_input_mutations, new_control_mutations = _copy_non_source(
op=op, graph=graph, op_map=op_map, base_graph=base_graph)
input_mutations.extend(new_input_mutations)
control_mutations.extend(new_control_mutations)
# Mutate the new graph to insert any loops which existed in the source
# graph due to v1 while_loops.
#
# pylint: disable=protected-access
with graph._mutation_lock():
for mutation in input_mutations:
mutation.copied_op._update_input(
mutation.input_index, op_map[mutation.old_graph_tensor])
for mutation in control_mutations:
# Don't lift the TPUReplicateMetadata nodes out of the function, because
# it has no registered kernels.
if mutation.old_graph_op.type == "TPUReplicateMetadata":
continue
mutation.copied_op._add_control_input(op_map[mutation.old_graph_op])
# pylint: enable=protected-access
return op_map
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import shlex
import textwrap
from tempfile import NamedTemporaryFile
from urllib.parse import urlparse
import pytest
import requests
from airflow.providers.google.cloud.example_dags.example_dataflow_flex_template import (
BQ_FLEX_TEMPLATE_DATASET,
BQ_FLEX_TEMPLATE_LOCATION,
DATAFLOW_FLEX_TEMPLATE_JOB_NAME,
GCS_FLEX_TEMPLATE_TEMPLATE_PATH,
PUBSUB_FLEX_TEMPLATE_SUBSCRIPTION,
PUBSUB_FLEX_TEMPLATE_TOPIC,
)
from airflow.providers.google.cloud.example_dags.example_dataflow_sql import (
BQ_SQL_DATASET,
DATAFLOW_SQL_JOB_NAME,
DATAFLOW_SQL_LOCATION,
)
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_DATAFLOW_KEY, GCP_GCS_TRANSFER_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_DATAFLOW_KEY)
class CloudDataflowExampleDagsSystemTest(GoogleSystemTest):
@provide_gcp_context(GCP_DATAFLOW_KEY)
def test_run_example_gcp_dataflow_native_java(self):
self.run_dag('example_gcp_dataflow_native_java', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_DATAFLOW_KEY)
def test_run_example_gcp_dataflow_native_python(self):
self.run_dag('example_gcp_dataflow_native_python', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_DATAFLOW_KEY)
def test_run_example_gcp_dataflow_native_python_async(self):
self.run_dag('example_gcp_dataflow_native_python_async', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_DATAFLOW_KEY)
def test_run_example_gcp_dataflow_template(self):
self.run_dag('example_gcp_dataflow_template', CLOUD_DAG_FOLDER)
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCR_FLEX_TEMPLATE_IMAGE = f"gcr.io/{GCP_PROJECT_ID}/samples-dataflow-streaming-beam-sql:latest"
# https://github.com/GoogleCloudPlatform/java-docs-samples/tree/954553c/dataflow/flex-templates/streaming_beam_sql
GCS_TEMPLATE_PARTS = urlparse(GCS_FLEX_TEMPLATE_TEMPLATE_PATH)
GCS_FLEX_TEMPLATE_BUCKET_NAME = GCS_TEMPLATE_PARTS.netloc
EXAMPLE_FLEX_TEMPLATE_REPO = "GoogleCloudPlatform/java-docs-samples"
EXAMPLE_FLEX_TEMPLATE_COMMIT = "deb0745be1d1ac1d133e1f0a7faa9413dbfbe5fe"
EXAMPLE_FLEX_TEMPLATE_SUBDIR = "dataflow/flex-templates/streaming_beam_sql"
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_GCS_TRANSFER_KEY)
class CloudDataflowExampleDagFlexTemplateJavagSystemTest(GoogleSystemTest):
@provide_gcp_context(GCP_GCS_TRANSFER_KEY, project_id=GoogleSystemTest._project_id())
def setUp(self) -> None:
super().setUp()
# Create a Cloud Storage bucket
self.execute_cmd(["gsutil", "mb", f"gs://{GCS_FLEX_TEMPLATE_BUCKET_NAME}"])
# Build image with pipeline
with NamedTemporaryFile("w") as f:
cloud_build_config = {
'steps': [
{'name': 'gcr.io/cloud-builders/git', 'args': ['clone', "$_EXAMPLE_REPO", "repo_dir"]},
{
'name': 'gcr.io/cloud-builders/git',
'args': ['checkout', '$_EXAMPLE_COMMIT'],
'dir': 'repo_dir',
},
{
'name': 'maven',
'args': ['mvn', 'clean', 'package'],
'dir': 'repo_dir/$_EXAMPLE_SUBDIR',
},
{
'name': 'gcr.io/cloud-builders/docker',
'args': ['build', '-t', '$_TEMPLATE_IMAGE', '.'],
'dir': 'repo_dir/$_EXAMPLE_SUBDIR',
},
],
'images': ['$_TEMPLATE_IMAGE'],
}
f.write(json.dumps(cloud_build_config))
f.flush()
self.execute_cmd(["cat", f.name])
substitutions = {
"_TEMPLATE_IMAGE": GCR_FLEX_TEMPLATE_IMAGE,
"_EXAMPLE_REPO": f"https://github.com/{EXAMPLE_FLEX_TEMPLATE_REPO}.git",
"_EXAMPLE_SUBDIR": EXAMPLE_FLEX_TEMPLATE_SUBDIR,
"_EXAMPLE_COMMIT": EXAMPLE_FLEX_TEMPLATE_COMMIT,
}
self.execute_cmd(
[
"gcloud",
"builds",
"submit",
"--substitutions=" + ",".join(f"{k}={shlex.quote(v)}" for k, v in substitutions.items()),
f"--config={f.name}",
"--no-source",
]
)
# Build template
with NamedTemporaryFile() as f: # type: ignore
manifest_url = (
f"https://raw.githubusercontent.com/"
f"{EXAMPLE_FLEX_TEMPLATE_REPO}/{EXAMPLE_FLEX_TEMPLATE_COMMIT}/"
f"{EXAMPLE_FLEX_TEMPLATE_SUBDIR}/metadata.json"
)
f.write(requests.get(manifest_url).content) # type: ignore
f.flush()
self.execute_cmd(
[
"gcloud",
"beta",
"dataflow",
"flex-template",
"build",
GCS_FLEX_TEMPLATE_TEMPLATE_PATH,
"--image",
GCR_FLEX_TEMPLATE_IMAGE,
"--sdk-language",
"JAVA",
"--metadata-file",
f.name,
]
)
# Create a Pub/Sub topic and a subscription to that topic
self.execute_cmd(["gcloud", "pubsub", "topics", "create", PUBSUB_FLEX_TEMPLATE_TOPIC])
self.execute_cmd(
[
"gcloud",
"pubsub",
"subscriptions",
"create",
"--topic",
PUBSUB_FLEX_TEMPLATE_TOPIC,
PUBSUB_FLEX_TEMPLATE_SUBSCRIPTION,
]
)
# Create a publisher for "positive ratings" that publishes 1 message per minute
self.execute_cmd(
[
"gcloud",
"scheduler",
"jobs",
"create",
"pubsub",
"positive-ratings-publisher",
'--schedule=* * * * *',
f"--topic={PUBSUB_FLEX_TEMPLATE_TOPIC}",
'--message-body=\'{"url": "https://beam.apache.org/", "review": "positive"}\'',
]
)
# Create and run another similar publisher for "negative ratings" that
self.execute_cmd(
[
"gcloud",
"scheduler",
"jobs",
"create",
"pubsub",
"negative-ratings-publisher",
'--schedule=*/2 * * * *',
f"--topic={PUBSUB_FLEX_TEMPLATE_TOPIC}",
'--message-body=\'{"url": "https://beam.apache.org/", "review": "negative"}\'',
]
)
# Create a BigQuery dataset
self.execute_cmd(["bq", "mk", "--dataset", f'{self._project_id()}:{BQ_FLEX_TEMPLATE_DATASET}'])
@provide_gcp_context(GCP_GCS_TRANSFER_KEY)
def test_run_example_dag_function(self):
self.run_dag("example_gcp_dataflow_flex_template_java", CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_GCS_TRANSFER_KEY, project_id=GoogleSystemTest._project_id())
def tearDown(self) -> None:
# Stop the Dataflow pipeline.
self.execute_cmd(
[
"bash",
"-c",
textwrap.dedent(
f"""\
gcloud dataflow jobs list \
--region={BQ_FLEX_TEMPLATE_LOCATION} \
--filter 'NAME:{DATAFLOW_FLEX_TEMPLATE_JOB_NAME} AND STATE=Running' \
--format 'value(JOB_ID)' \
| xargs -r gcloud dataflow jobs cancel --region={BQ_FLEX_TEMPLATE_LOCATION}
"""
),
]
)
# Delete the template spec file from Cloud Storage
self.execute_cmd(["gsutil", "rm", GCS_FLEX_TEMPLATE_TEMPLATE_PATH])
# Delete the Flex Template container image from Container Registry.
self.execute_cmd(
[
"gcloud",
"container",
"images",
"delete",
GCR_FLEX_TEMPLATE_IMAGE,
"--force-delete-tags",
"--quiet",
]
)
# Delete the Cloud Scheduler jobs.
self.execute_cmd(["gcloud", "scheduler", "jobs", "delete", "negative-ratings-publisher", "--quiet"])
self.execute_cmd(["gcloud", "scheduler", "jobs", "delete", "positive-ratings-publisher", "--quiet"])
# Delete the Pub/Sub subscription and topic.
self.execute_cmd(["gcloud", "pubsub", "subscriptions", "delete", PUBSUB_FLEX_TEMPLATE_SUBSCRIPTION])
self.execute_cmd(["gcloud", "pubsub", "topics", "delete", PUBSUB_FLEX_TEMPLATE_TOPIC])
# Delete the BigQuery dataset,
self.execute_cmd(["bq", "rm", "-r", "-f", "-d", f'{self._project_id()}:{BQ_FLEX_TEMPLATE_DATASET}'])
# Delete the Cloud Storage bucket
self.execute_cmd(["gsutil", "rm", "-r", f"gs://{GCS_FLEX_TEMPLATE_BUCKET_NAME}"])
super().tearDown()
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_GCS_TRANSFER_KEY)
class CloudDataflowExampleDagSqlSystemTest(GoogleSystemTest):
@provide_gcp_context(GCP_GCS_TRANSFER_KEY, project_id=GoogleSystemTest._project_id())
def setUp(self) -> None:
super().setUp()
# Build image with pipeline
with NamedTemporaryFile(suffix=".csv") as f:
f.write(
textwrap.dedent(
"""\
state_id,state_code,state_name,sales_region
1,MO,Missouri,Region_1
2,SC,South Carolina,Region_1
3,IN,Indiana,Region_1
6,DE,Delaware,Region_2
15,VT,Vermont,Region_2
16,DC,District of Columbia,Region_2
19,CT,Connecticut,Region_2
20,ME,Maine,Region_2
35,PA,Pennsylvania,Region_2
38,NJ,New Jersey,Region_2
47,MA,Massachusetts,Region_2
54,RI,Rhode Island,Region_2
55,NY,New York,Region_2
60,MD,Maryland,Region_2
66,NH,New Hampshire,Region_2
4,CA,California,Region_3
8,AK,Alaska,Region_3
37,WA,Washington,Region_3
61,OR,Oregon,Region_3
33,HI,Hawaii,Region_4
59,AS,American Samoa,Region_4
65,GU,Guam,Region_4
5,IA,Iowa,Region_5
32,NV,Nevada,Region_5
11,PR,Puerto Rico,Region_6
17,CO,Colorado,Region_6
18,MS,Mississippi,Region_6
41,AL,Alabama,Region_6
42,AR,Arkansas,Region_6
43,FL,Florida,Region_6
44,NM,New Mexico,Region_6
46,GA,Georgia,Region_6
48,KS,Kansas,Region_6
52,AZ,Arizona,Region_6
56,TN,Tennessee,Region_6
58,TX,Texas,Region_6
63,LA,Louisiana,Region_6
7,ID,Idaho,Region_7
12,IL,Illinois,Region_7
13,ND,North Dakota,Region_7
31,MN,Minnesota,Region_7
34,MT,Montana,Region_7
36,SD,South Dakota,Region_7
50,MI,Michigan,Region_7
51,UT,Utah,Region_7
64,WY,Wyoming,Region_7
9,NE,Nebraska,Region_8
10,VA,Virginia,Region_8
14,OK,Oklahoma,Region_8
39,NC,North Carolina,Region_8
40,WV,West Virginia,Region_8
45,KY,Kentucky,Region_8
53,WI,Wisconsin,Region_8
57,OH,Ohio,Region_8
49,VI,United States Virgin Islands,Region_9
62,MP,Commonwealth of the Northern Mariana Islands,Region_9
"""
).encode()
)
f.flush()
self.execute_cmd(["bq", "mk", "--dataset", f'{self._project_id()}:{BQ_SQL_DATASET}'])
self.execute_cmd(
["bq", "load", "--autodetect", "--source_format=CSV", f"{BQ_SQL_DATASET}.beam_input", f.name]
)
@provide_gcp_context(GCP_GCS_TRANSFER_KEY, project_id=GoogleSystemTest._project_id())
def test_run_example_dag_function(self):
self.run_dag("example_gcp_dataflow_sql", CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_GCS_TRANSFER_KEY, project_id=GoogleSystemTest._project_id())
def tearDown(self) -> None:
# Execute test query
self.execute_cmd(
[
'bq',
'query',
'--use_legacy_sql=false',
f'select * FROM `{self._project_id()}.{BQ_SQL_DATASET}.beam_output`',
]
)
# Stop the Dataflow pipelines.
self.execute_cmd(
[
"bash",
"-c",
textwrap.dedent(
f"""\
gcloud dataflow jobs list \
--region={DATAFLOW_SQL_LOCATION} \
--filter 'NAME:{DATAFLOW_SQL_JOB_NAME} AND STATE=Running' \
--format 'value(JOB_ID)' \
| xargs -r gcloud dataflow jobs cancel --region={DATAFLOW_SQL_LOCATION}
"""
),
]
)
# Delete the BigQuery dataset,
self.execute_cmd(["bq", "rm", "-r", "-f", "-d", f'{self._project_id()}:{BQ_SQL_DATASET}'])
super().tearDown()
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class auditmessageaction(base_resource) :
""" Configuration for message action resource. """
def __init__(self) :
self._name = ""
self._loglevel = ""
self._stringbuilderexpr = ""
self._logtonewnslog = ""
self._bypasssafetycheck = ""
self._loglevel1 = ""
self._hits = 0
self._undefhits = 0
self._referencecount = 0
self.___count = 0
@property
def name(self) :
ur"""Name of the audit message action. Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after the message action is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my message action" or 'my message action').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the audit message action. Must begin with a letter, number, or the underscore character (_), and must contain only letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at (@), equals (=), colon (:), and underscore characters. Cannot be changed after the message action is added.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my message action" or 'my message action').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def loglevel(self) :
ur"""Audit log level, which specifies the severity level of the log message being generated..
The following loglevels are valid:
* EMERGENCY - Events that indicate an immediate crisis on the server.
* ALERT - Events that might require action.
* CRITICAL - Events that indicate an imminent server crisis.
* ERROR - Events that indicate some type of error.
* WARNING - Events that require action in the near future.
* NOTICE - Events that the administrator should know about.
* INFORMATIONAL - All but low-level events.
* DEBUG - All events, in extreme detail.<br/>Possible values = EMERGENCY, ALERT, CRITICAL, ERROR, WARNING, NOTICE, INFORMATIONAL, DEBUG.
"""
try :
return self._loglevel
except Exception as e:
raise e
@loglevel.setter
def loglevel(self, loglevel) :
ur"""Audit log level, which specifies the severity level of the log message being generated..
The following loglevels are valid:
* EMERGENCY - Events that indicate an immediate crisis on the server.
* ALERT - Events that might require action.
* CRITICAL - Events that indicate an imminent server crisis.
* ERROR - Events that indicate some type of error.
* WARNING - Events that require action in the near future.
* NOTICE - Events that the administrator should know about.
* INFORMATIONAL - All but low-level events.
* DEBUG - All events, in extreme detail.<br/>Possible values = EMERGENCY, ALERT, CRITICAL, ERROR, WARNING, NOTICE, INFORMATIONAL, DEBUG
"""
try :
self._loglevel = loglevel
except Exception as e:
raise e
@property
def stringbuilderexpr(self) :
ur"""Default-syntax expression that defines the format and content of the log message.
"""
try :
return self._stringbuilderexpr
except Exception as e:
raise e
@stringbuilderexpr.setter
def stringbuilderexpr(self, stringbuilderexpr) :
ur"""Default-syntax expression that defines the format and content of the log message.
"""
try :
self._stringbuilderexpr = stringbuilderexpr
except Exception as e:
raise e
@property
def logtonewnslog(self) :
ur"""Send the message to the new nslog.<br/>Possible values = YES, NO.
"""
try :
return self._logtonewnslog
except Exception as e:
raise e
@logtonewnslog.setter
def logtonewnslog(self, logtonewnslog) :
ur"""Send the message to the new nslog.<br/>Possible values = YES, NO
"""
try :
self._logtonewnslog = logtonewnslog
except Exception as e:
raise e
@property
def bypasssafetycheck(self) :
ur"""Bypass the safety check and allow unsafe expressions.<br/>Default value: NO<br/>Possible values = YES, NO.
"""
try :
return self._bypasssafetycheck
except Exception as e:
raise e
@bypasssafetycheck.setter
def bypasssafetycheck(self, bypasssafetycheck) :
ur"""Bypass the safety check and allow unsafe expressions.<br/>Default value: NO<br/>Possible values = YES, NO
"""
try :
self._bypasssafetycheck = bypasssafetycheck
except Exception as e:
raise e
@property
def loglevel1(self) :
ur""".<br/>Possible values = ALL, EMERGENCY, ALERT, CRITICAL, ERROR, WARNING, NOTICE, INFORMATIONAL, DEBUG, NONE.
"""
try :
return self._loglevel1
except Exception as e:
raise e
@property
def hits(self) :
ur"""The number of times the action has been taken.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def undefhits(self) :
ur"""The number of times the action resulted in UNDEF.
"""
try :
return self._undefhits
except Exception as e:
raise e
@property
def referencecount(self) :
ur"""The number of references to the action.
"""
try :
return self._referencecount
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(auditmessageaction_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.auditmessageaction
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add auditmessageaction.
"""
try :
if type(resource) is not list :
addresource = auditmessageaction()
addresource.name = resource.name
addresource.loglevel = resource.loglevel
addresource.stringbuilderexpr = resource.stringbuilderexpr
addresource.logtonewnslog = resource.logtonewnslog
addresource.bypasssafetycheck = resource.bypasssafetycheck
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ auditmessageaction() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].loglevel = resource[i].loglevel
addresources[i].stringbuilderexpr = resource[i].stringbuilderexpr
addresources[i].logtonewnslog = resource[i].logtonewnslog
addresources[i].bypasssafetycheck = resource[i].bypasssafetycheck
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete auditmessageaction.
"""
try :
if type(resource) is not list :
deleteresource = auditmessageaction()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ auditmessageaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ auditmessageaction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update auditmessageaction.
"""
try :
if type(resource) is not list :
updateresource = auditmessageaction()
updateresource.name = resource.name
updateresource.loglevel = resource.loglevel
updateresource.stringbuilderexpr = resource.stringbuilderexpr
updateresource.logtonewnslog = resource.logtonewnslog
updateresource.bypasssafetycheck = resource.bypasssafetycheck
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ auditmessageaction() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].loglevel = resource[i].loglevel
updateresources[i].stringbuilderexpr = resource[i].stringbuilderexpr
updateresources[i].logtonewnslog = resource[i].logtonewnslog
updateresources[i].bypasssafetycheck = resource[i].bypasssafetycheck
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of auditmessageaction resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = auditmessageaction()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ auditmessageaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ auditmessageaction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the auditmessageaction resources that are configured on netscaler.
"""
try :
if not name :
obj = auditmessageaction()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = auditmessageaction()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [auditmessageaction() for _ in range(len(name))]
obj = [auditmessageaction() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = auditmessageaction()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of auditmessageaction resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditmessageaction()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the auditmessageaction resources configured on NetScaler.
"""
try :
obj = auditmessageaction()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of auditmessageaction resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditmessageaction()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Loglevel1:
ALL = "ALL"
EMERGENCY = "EMERGENCY"
ALERT = "ALERT"
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
NOTICE = "NOTICE"
INFORMATIONAL = "INFORMATIONAL"
DEBUG = "DEBUG"
NONE = "NONE"
class Logtonewnslog:
YES = "YES"
NO = "NO"
class Loglevel:
EMERGENCY = "EMERGENCY"
ALERT = "ALERT"
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
NOTICE = "NOTICE"
INFORMATIONAL = "INFORMATIONAL"
DEBUG = "DEBUG"
class Bypasssafetycheck:
YES = "YES"
NO = "NO"
class auditmessageaction_response(base_response) :
def __init__(self, length=1) :
self.auditmessageaction = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.auditmessageaction = [auditmessageaction() for _ in range(length)]
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for rally data.
"""
import uuid
from oslo_db.sqlalchemy.compat import utils as compat_utils
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import schema
from sqlalchemy import types
from rally.common.db.sqlalchemy import types as sa_types
from rally import consts
BASE = declarative_base()
def UUID():
return str(uuid.uuid4())
class RallyBase(models.ModelBase):
metadata = None
created_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow())
updated_at = sa.Column(sa.DateTime, default=lambda: timeutils.utcnow(),
onupdate=lambda: timeutils.utcnow())
def save(self, session=None):
from rally.common.db.sqlalchemy import api as sa_api
if session is None:
session = sa_api.get_session()
super(RallyBase, self).save(session=session)
class Deployment(BASE, RallyBase):
"""Represent a deployment of OpenStack."""
__tablename__ = "deployments"
__table_args__ = (
sa.Index("deployment_uuid", "uuid", unique=True),
sa.Index("deployment_parent_uuid", "parent_uuid"),
)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36), default=UUID, nullable=False)
parent_uuid = sa.Column(
sa.String(36),
sa.ForeignKey(uuid, use_alter=True, name="fk_parent_uuid"),
default=None,
)
name = sa.Column(sa.String(255), unique=True)
started_at = sa.Column(sa.DateTime)
completed_at = sa.Column(sa.DateTime)
# XXX(akscram): Do we need to explicitly store a name of the
# deployment engine?
# engine_name = sa.Column(sa.String(36))
config = sa.Column(
sa_types.MutableJSONEncodedDict,
default={},
nullable=False,
)
# NOTE(boris-42): This is pickled rally.object.Endpoint object
admin = sa.Column(types.PickleType, nullable=True)
# NOTE(boris-42): This is list of pickled rally.object.Endpoint objects
users = sa.Column(types.PickleType, default=[], nullable=False)
status = sa.Column(
sa.Enum(*consts.DeployStatus, name="enum_deploy_status"),
name="enum_deployments_status",
default=consts.DeployStatus.DEPLOY_INIT,
nullable=False,
)
parent = sa.orm.relationship(
"Deployment",
backref=sa.orm.backref("subdeploys"),
remote_side=[uuid],
foreign_keys=parent_uuid,
)
class Resource(BASE, RallyBase):
"""Represent a resource of a deployment."""
__tablename__ = "resources"
__table_args__ = (
sa.Index("resource_deployment_uuid", "deployment_uuid"),
sa.Index("resource_provider_name", "deployment_uuid", "provider_name"),
sa.Index("resource_type", "deployment_uuid", "type"),
sa.Index("resource_provider_name_and_type", "deployment_uuid",
"provider_name", "type"),
)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
provider_name = sa.Column(sa.String(255))
type = sa.Column(sa.String(255))
info = sa.Column(
sa_types.MutableJSONEncodedDict,
default={},
nullable=False,
)
deployment_uuid = sa.Column(
sa.String(36),
sa.ForeignKey(Deployment.uuid),
nullable=False,
)
deployment = sa.orm.relationship(
Deployment,
backref=sa.orm.backref("resources"),
foreign_keys=deployment_uuid,
primaryjoin=(deployment_uuid == Deployment.uuid),
)
class Task(BASE, RallyBase):
"""Represents a Benchmark task."""
__tablename__ = "tasks"
__table_args__ = (
sa.Index("task_uuid", "uuid", unique=True),
sa.Index("task_status", "status"),
sa.Index("task_deployment", "deployment_uuid"),
)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36), default=UUID, nullable=False)
status = sa.Column(sa.Enum(*list(consts.TaskStatus),
name="enum_tasks_status"),
default=consts.TaskStatus.INIT,
nullable=False)
verification_log = sa.Column(sa.Text, default="")
tag = sa.Column(sa.String(64), default="")
deployment_uuid = sa.Column(
sa.String(36),
sa.ForeignKey(Deployment.uuid),
nullable=False,
)
deployment = sa.orm.relationship(
Deployment,
backref=sa.orm.backref("tasks"),
foreign_keys=deployment_uuid,
primaryjoin=(deployment_uuid == Deployment.uuid),
)
class TaskResult(BASE, RallyBase):
__tablename__ = "task_results"
__table_args__ = ()
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
key = sa.Column(sa_types.MutableJSONEncodedDict, nullable=False)
data = sa.Column(sa_types.BigMutableJSONEncodedDict, nullable=False)
task_uuid = sa.Column(sa.String(36), sa.ForeignKey("tasks.uuid"))
task = sa.orm.relationship(Task,
backref=sa.orm.backref("results"),
foreign_keys=task_uuid,
primaryjoin="TaskResult.task_uuid == Task.uuid")
class Verification(BASE, RallyBase):
"""Represents a verifier result."""
__tablename__ = "verifications"
__table_args__ = (
sa.Index("verification_uuid", "uuid", unique=True),
)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36), default=UUID, nullable=False)
deployment_uuid = sa.Column(
sa.String(36),
sa.ForeignKey(Deployment.uuid),
nullable=False,
)
status = sa.Column(sa.Enum(*list(consts.TaskStatus),
name="enum_tasks_status"),
default=consts.TaskStatus.INIT,
nullable=False)
set_name = sa.Column(sa.String(20))
tests = sa.Column(sa.Integer, default=0)
# TODO(andreykurilin): remove this variable, when rally will support db
# migrations. Reason: It is not used anywhere :)
errors = sa.Column(sa.Integer, default=0)
failures = sa.Column(sa.Integer, default=0)
time = sa.Column(sa.Float, default=0.0)
class VerificationResult(BASE, RallyBase):
__tablename__ = "verification_results"
__table_args__ = ()
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
verification_uuid = sa.Column(sa.String(36),
sa.ForeignKey("verifications.uuid"))
data = sa.Column(sa_types.BigMutableJSONEncodedDict, nullable=False)
class Worker(BASE, RallyBase):
__tablename__ = "workers"
__table_args__ = (
schema.UniqueConstraint("hostname", name="uniq_worker@hostname"),
)
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
hostname = sa.Column(sa.String(255))
# TODO(boris-42): Remove it after oslo.db > 1.4.1 will be released.
def drop_all_objects(engine):
"""Drop all database objects.
Drops all database objects remaining on the default schema of the given
engine. Per-db implementations will also need to drop items specific to
those systems, such as sequences, custom types (e.g. pg ENUM), etc.
"""
with engine.begin() as conn:
inspector = sa.inspect(engine)
metadata = schema.MetaData()
tbs = []
all_fks = []
for table_name in inspector.get_table_names():
fks = []
for fk in inspector.get_foreign_keys(table_name):
if not fk["name"]:
continue
fks.append(
schema.ForeignKeyConstraint((), (), name=fk["name"]))
table = schema.Table(table_name, metadata, *fks)
tbs.append(table)
all_fks.extend(fks)
if engine.name != "sqlite":
for fkc in all_fks:
conn.execute(schema.DropConstraint(fkc))
for table in tbs:
conn.execute(schema.DropTable(table))
if engine.name == "postgresql":
if compat_utils.sqla_100:
enums = [e["name"] for e in sa.inspect(conn).get_enums()]
else:
enums = conn.dialect._load_enums(conn).keys()
for e in enums:
conn.execute("DROP TYPE %s" % e)
def drop_db():
from rally.common.db.sqlalchemy import api as sa_api
drop_all_objects(sa_api.get_engine())
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class FileServersOperations(object):
"""FileServersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Specifies the version of API used for this request. Constant value: "2017-09-01-preview".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01-preview"
self.config = config
def create(
self, resource_group_name, file_server_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates a file server.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_server_name: The name of the file server within the
specified resource group. File server names can only contain a
combination of alphanumeric characters along with dash (-) and
underscore (_). The name must be from 1 through 64 characters long.
:type file_server_name: str
:param parameters: The parameters to provide for file server creation.
:type parameters: :class:`FileServerCreateParameters
<azure.mgmt.batchai.models.FileServerCreateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`FileServer
<azure.mgmt.batchai.models.FileServer>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers/{fileServerName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'fileServerName': self._serialize.url("file_server_name", file_server_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FileServerCreateParameters')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FileServer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, file_server_name, custom_headers=None, raw=False, **operation_config):
"""Delete a file Server.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_server_name: The name of the file server within the
specified resource group. File server names can only contain a
combination of alphanumeric characters along with dash (-) and
underscore (_). The name must be from 1 through 64 characters long.
:type file_server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers/{fileServerName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'fileServerName': self._serialize.url("file_server_name", file_server_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, file_server_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified Cluster.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_server_name: The name of the file server within the
specified resource group. File server names can only contain a
combination of alphanumeric characters along with dash (-) and
underscore (_). The name must be from 1 through 64 characters long.
:type file_server_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`FileServer <azure.mgmt.batchai.models.FileServer>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`FileServer <azure.mgmt.batchai.models.FileServer>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers/{fileServerName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'fileServerName': self._serialize.url("file_server_name", file_server_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FileServer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list(
self, file_servers_list_options=None, custom_headers=None, raw=False, **operation_config):
"""To list all the file servers available under the given subscription
(and across all resource groups within that subscription).
:param file_servers_list_options: Additional parameters for the
operation
:type file_servers_list_options: :class:`FileServersListOptions
<azure.mgmt.batchai.models.FileServersListOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`FileServer
<azure.mgmt.batchai.models.FileServer>`
:rtype: :class:`FileServerPaged
<azure.mgmt.batchai.models.FileServerPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
filter = None
if file_servers_list_options is not None:
filter = file_servers_list_options.filter
select = None
if file_servers_list_options is not None:
select = file_servers_list_options.select
max_results = None
if file_servers_list_options is not None:
max_results = file_servers_list_options.max_results
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.BatchAI/fileServers'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.FileServerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.FileServerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, file_servers_list_by_resource_group_options=None, custom_headers=None, raw=False, **operation_config):
"""Gets a formatted list of file servers and their properties associated
within the specified resource group.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param file_servers_list_by_resource_group_options: Additional
parameters for the operation
:type file_servers_list_by_resource_group_options:
:class:`FileServersListByResourceGroupOptions
<azure.mgmt.batchai.models.FileServersListByResourceGroupOptions>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`FileServer
<azure.mgmt.batchai.models.FileServer>`
:rtype: :class:`FileServerPaged
<azure.mgmt.batchai.models.FileServerPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
filter = None
if file_servers_list_by_resource_group_options is not None:
filter = file_servers_list_by_resource_group_options.filter
select = None
if file_servers_list_by_resource_group_options is not None:
select = file_servers_list_by_resource_group_options.select
max_results = None
if file_servers_list_by_resource_group_options is not None:
max_results = file_servers_list_by_resource_group_options.max_results
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
if max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", max_results, 'int', maximum=1000, minimum=1)
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.FileServerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.FileServerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Park_fee.car_number'
db.alter_column(u'SmartDataApp_park_fee', 'car_number', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
# Changing field 'Park_fee.park_type'
db.alter_column(u'SmartDataApp_park_fee', 'park_type', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
def backwards(self, orm):
# Changing field 'Park_fee.car_number'
db.alter_column(u'SmartDataApp_park_fee', 'car_number', self.gf('django.db.models.fields.CharField')(max_length=20, null=True))
# Changing field 'Park_fee.park_type'
db.alter_column(u'SmartDataApp_park_fee', 'park_type', self.gf('django.db.models.fields.CharField')(max_length=20, null=True))
models = {
u'SmartDataApp.community': {
'Meta': {'object_name': 'Community'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'SmartDataApp.complaints': {
'Meta': {'object_name': 'Complaints'},
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'author_detail': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'content': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'handler': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_worker_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pleased': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pleased_reason': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'SmartDataApp.express': {
'Meta': {'object_name': 'Express'},
'allowable_get_express_time': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'arrive_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'get_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'handler': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_worker_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pleased': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pleased_reason': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'signer': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'submit_express_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'SmartDataApp.fees': {
'Meta': {'object_name': 'Fees'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '19', 'decimal_places': '3'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'SmartDataApp.housekeeping': {
'Meta': {'object_name': 'Housekeeping'},
'allow_deal_time': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'handler': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'housekeeping_item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Housekeeping_items']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_worker_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pleased': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pleased_reason': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'SmartDataApp.housekeeping_items': {
'Meta': {'object_name': 'Housekeeping_items'},
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'price': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'price_description': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'remarks': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'})
},
u'SmartDataApp.notification': {
'Meta': {'object_name': 'Notification'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'notification_content': ('django.db.models.fields.CharField', [], {'max_length': '100000', 'null': 'True', 'blank': 'True'}),
'notification_theme': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'notification_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'SmartDataApp.park_fee': {
'Meta': {'object_name': 'Park_fee'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
'car_number': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'park_type': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'renewal_fees': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'valid_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'})
},
u'SmartDataApp.picture': {
'Meta': {'object_name': 'Picture'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keep': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'like': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'timestamp_add': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'SmartDataApp.profiledetail': {
'Meta': {'object_name': 'ProfileDetail'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'device_chanel_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True'}),
'device_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True'}),
'device_user_id': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '250', 'null': 'True'}),
'floor': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'gate_card': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '11', 'null': 'True'}),
'profile': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'SmartDataApp.repair': {
'Meta': {'object_name': 'Repair'},
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'author_detail': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'content': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'handler': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_worker_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pleased': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pleased_reason': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repair_item': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'SmartDataApp.repair_item': {
'Meta': {'object_name': 'Repair_item'},
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'SmartDataApp.transaction': {
'Meta': {'object_name': 'Transaction'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'grade_num': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'money_num': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '19', 'decimal_places': '6'}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'wallet_profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Wallet']", 'null': 'True'})
},
u'SmartDataApp.wallet': {
'Meta': {'object_name': 'Wallet'},
'grade_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'money_sum': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '19', 'decimal_places': '6'}),
'user_profile': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'unique': 'True', 'null': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['SmartDataApp']
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Laplace distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Laplace(distribution.Distribution):
"""The Laplace distribution with location and scale > 0 parameters.
#### Mathematical details
The PDF of this distribution is:
```f(x | mu, b, b > 0) = 0.5 / b exp(-|x - mu| / b)```
Note that the Laplace distribution can be thought of two exponential
distributions spliced together "back-to-back."
"""
def __init__(self,
loc,
scale,
validate_args=True,
allow_nan_stats=False,
name="Laplace"):
"""Construct Laplace distribution with parameters `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g., `loc / scale` is a valid operation).
Args:
loc: Floating point tensor which characterizes the location (center)
of the distribution.
scale: Positive floating point tensor which characterizes the spread of
the distribution.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`, and the inputs are invalid, correct behavior is not
guaranteed.
allow_nan_stats: Boolean, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if `loc` and `scale` are of different dtype.
"""
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
with ops.op_scope([loc, scale], name):
loc = ops.convert_to_tensor(loc)
scale = ops.convert_to_tensor(scale)
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._name = name
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
self._batch_shape = self._ones().get_shape()
self._event_shape = tensor_shape.TensorShape([])
contrib_tensor_util.assert_same_float_dtype((loc, scale))
@property
def allow_nan_stats(self):
"""Boolean describing behavior when a stat is undefined for batch member."""
return self._allow_nan_stats
@property
def validate_args(self):
"""Boolean describing behavior on invalid input."""
return self._validate_args
@property
def name(self):
return self._name
@property
def dtype(self):
return self._loc.dtype
def batch_shape(self, name="batch_shape"):
"""Batch dimensions of this instance as a 1-D int32 `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op.
Returns:
`Tensor` `batch_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return array_ops.shape(self._ones())
def get_batch_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch shape
"""
return self._batch_shape
def event_shape(self, name="event_shape"):
"""Shape of a sample from a single distribution as a 1-D int32 `Tensor`.
Args:
name: name to give to the op.
Returns:
`Tensor` `event_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return constant_op.constant([], dtype=dtypes.int32)
def get_event_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event shape
"""
return self._event_shape
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def mean(self, name="mean"):
"""Mean of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([self._scale, self._loc], name):
return self._loc + array_ops.zeros_like(self._scale)
def median(self, name="median"):
"""Median of this distribution."""
return self.mean(name="median")
def mode(self, name="mode"):
"""Mode of this distribution."""
return self.mean(name="mode")
def std(self, name="std"):
"""Standard deviation of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([self._scale, self._loc], name):
sqrt_2 = constant_op.constant(math.sqrt(2.), dtype=self.dtype)
return sqrt_2 * self._scale + array_ops.zeros_like(self._loc)
def variance(self, name="variance"):
"""Variance of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return math_ops.square(self.std())
def prob(self, x, name="pdf"):
"""The prob of observations in `x` under the Laplace distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `loc` and `scale`.
name: The name to give this op.
Returns:
pdf: tensor of dtype `dtype`, the pdf values of `x`.
"""
return 0.5 / self._scale * math_ops.exp(
-math_ops.abs(x - self._loc) / self._scale)
def log_prob(self, x, name="log_prob"):
"""Log prob of observations in `x` under these Laplace distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `loc` and `scale`.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-probability of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._loc, self._scale, x], name):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
log_2 = constant_op.constant(math.log(2.), dtype=self.dtype)
return (-log_2 - math_ops.log(self._scale) -
math_ops.abs(x - self._loc) / self._scale)
def cdf(self, x, name="cdf"):
"""CDF of observations in `x` under the Laplace distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `loc` and `scale`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._loc, self._scale, x], name):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
y = x - self._loc
return 0.5 + 0.5 * math_ops.sign(y) * (
1. - math_ops.exp(-math_ops.abs(y) / self._scale))
def log_cdf(self, x, name="log_cdf"):
"""Log CDF of observations `x` under the Laplace distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `loc` and `scale`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._loc, self._scale, x], name):
return math_ops.log(self.cdf(x))
def entropy(self, name="entropy"):
"""The entropy of Laplace distribution(s).
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._loc, self._scale], name):
log_2_e = constant_op.constant(math.log(2.) + 1., dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast scale.
scale = self._scale + array_ops.zeros_like(self._loc)
return log_2_e + math_ops.log(scale)
def sample_n(self, n, seed=None, name="sample_n"):
"""Sample `n` observations from the Laplace Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the parameters.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._loc, self._scale, n], name):
n = ops.convert_to_tensor(n)
n_val = tensor_util.constant_value(n)
shape = array_ops.concat(0, ([n], self.batch_shape()))
# Sample uniformly-at-random from the open-interval (-1, 1).
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=self.dtype.as_numpy_dtype(1.),
dtype=self.dtype,
seed=seed)
# Provide some hints to shape inference
inferred_shape = tensor_shape.vector(n_val).concatenate(
self.get_batch_shape())
uniform_samples.set_shape(inferred_shape)
return (self._loc - self._scale * math_ops.sign(uniform_samples) *
math_ops.log(1. - math_ops.abs(uniform_samples)))
@property
def is_reparameterized(self):
return True
def _ones(self):
return array_ops.ones_like(self._loc + self._scale)
def _zeros(self):
return array_ops.zeros_like(self._loc + self._scale)
@property
def is_continuous(self):
return True
| |
# run with: python manage.py test hs_core.tests.serialization.test_resourcemeta_sax_parsing
import unittest
import xml.sax
from hs_core.serialization import GenericResourceSAXHandler
from hs_geo_raster_resource.serialization import RasterResourceSAXHandler
from hs_app_netCDF.serialization import NetcdfResourceSAXHandler
class TestGenericResourceMetaSax(unittest.TestCase):
def setUp(self):
self.parse_sample = """<?xml version="1.0"?>
<!DOCTYPE rdf:RDF PUBLIC "-//DUBLIN CORE//DCMES DTD 2002/07/31//EN"
"http://dublincore.org/documents/2002/07/31/dcmes-xml/dcmes-xml-dtd.dtd">
<rdf:RDF xmlns:dcterms="http://purl.org/dc/terms/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:hsterms="https://www.hydroshare.org/terms/">
<rdf:Description rdf:about="http://localhost:8000/resource/dc52e6aa93154521af08522de27ec276">
<dc:contributor>
<rdf:Description rdf:about="http://localhost:8000/user/1/">
<hsterms:name>Brian Miles</hsterms:name>
<hsterms:organization>Someplace</hsterms:organization>
<hsterms:email>foo@gmail.com</hsterms:email>
<hsterms:address>123 Main Street</hsterms:address>
<hsterms:phone rdf:resource="tel:412-555-1212"/>
<hsterms:homepage rdf:resource="http://www.ie.unc.edu/"/>
</rdf:Description>
</dc:contributor>
<dc:contributor>
<rdf:Description rdf:about="http://localhost:8000/user/2/">
<hsterms:name>Miles Brian</hsterms:name>
<hsterms:organization>Elsewhere</hsterms:organization>
<hsterms:email>bar@icloud.com</hsterms:email>
<hsterms:address>123 Wall Street</hsterms:address>
<hsterms:phone rdf:resource="tel:412-555-2121"/>
<hsterms:homepage rdf:resource="http://www.cmu.edu/"/>
</rdf:Description>
</dc:contributor>
<dc:subject>HydroShare</dc:subject>
<dc:subject>cuahsi</dc:subject>
<dc:subject>Presentation</dc:subject>
<dc:subject>Hydroinformatics</dc:subject>
</rdf:Description>
</rdf:RDF>
"""
def tearDown(self):
pass
def test_sax_parsing(self):
handler = GenericResourceSAXHandler()
xml.sax.parseString(self.parse_sample, handler)
self.assertTrue(len(handler.subjects) == 4)
self.assertEqual(handler.subjects[0], 'HydroShare')
self.assertEqual(handler.subjects[1], 'cuahsi')
self.assertEqual(handler.subjects[2], 'Presentation')
self.assertEqual(handler.subjects[3], 'Hydroinformatics')
self.assertTrue(len(handler.contributors) == 2)
self.assertEqual(handler.contributors[0].uri, 'http://localhost:8000/user/1/')
self.assertEqual(handler.contributors[0].name, 'Brian Miles')
self.assertEqual(handler.contributors[0].organization, 'Someplace')
self.assertEqual(handler.contributors[0].email, 'foo@gmail.com')
self.assertEqual(handler.contributors[0].address, '123 Main Street')
self.assertEqual(handler.contributors[0].phone, '412-555-1212')
self.assertEqual(handler.contributors[1].uri, 'http://localhost:8000/user/2/')
self.assertEqual(handler.contributors[1].name, 'Miles Brian')
self.assertEqual(handler.contributors[1].organization, 'Elsewhere')
self.assertEqual(handler.contributors[1].email, 'bar@icloud.com')
self.assertEqual(handler.contributors[1].address, '123 Wall Street')
self.assertEqual(handler.contributors[1].phone, '412-555-2121')
class TestRasterResourceMetaSax(unittest.TestCase):
def setUp(self):
self.parse_sample = """<?xml version="1.0"?>
<!DOCTYPE rdf:RDF PUBLIC "-//DUBLIN CORE//DCMES DTD 2002/07/31//EN"
"http://dublincore.org/documents/2002/07/31/dcmes-xml/dcmes-xml-dtd.dtd">
<rdf:RDF xmlns:dcterms="http://purl.org/dc/terms/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:hsterms="https://www.hydroshare.org/terms/">
<rdf:Description rdf:about="http://localhost:8000/resource/dc52e6aa93154521af08522de27ec276">
<hsterms:BandInformation>
<rdf:Description>
<hsterms:name>Band_1</hsterms:name>
<hsterms:variableName>red</hsterms:variableName>
<hsterms:variableUnit>DN</hsterms:variableUnit>
<hsterms:method>measured</hsterms:method>
<hsterms:comment>real good.</hsterms:comment>
</rdf:Description>
</hsterms:BandInformation>
<hsterms:BandInformation>
<rdf:Description>
<hsterms:name>Band_2</hsterms:name>
<hsterms:variableName>green</hsterms:variableName>
<hsterms:variableUnit>DN</hsterms:variableUnit>
<hsterms:method>guessed</hsterms:method>
<hsterms:comment>not so good.</hsterms:comment>
</rdf:Description>
</hsterms:BandInformation>
<hsterms:BandInformation>
<rdf:Description>
<hsterms:name>Band_3</hsterms:name>
<hsterms:variableName>blue</hsterms:variableName>
<hsterms:variableUnit>DN</hsterms:variableUnit>
<hsterms:method>random</hsterms:method>
<hsterms:comment>random like.</hsterms:comment>
</rdf:Description>
</hsterms:BandInformation>
</rdf:Description>
</rdf:RDF>
"""
def tearDown(self):
pass
def test_sax_parsing(self):
handler = RasterResourceSAXHandler()
xml.sax.parseString(self.parse_sample, handler)
self.assertTrue(len(handler.band_info) == 3)
self.assertEqual(handler.band_info[0].name, 'Band_1')
self.assertEqual(handler.band_info[0].variableName, 'red')
self.assertEqual(handler.band_info[0].variableUnit, 'DN')
self.assertEqual(handler.band_info[0].method, 'measured')
self.assertEqual(handler.band_info[0].comment, 'real good.')
self.assertEqual(handler.band_info[1].name, 'Band_2')
self.assertEqual(handler.band_info[1].variableName, 'green')
self.assertEqual(handler.band_info[1].variableUnit, 'DN')
self.assertEqual(handler.band_info[1].method, 'guessed')
self.assertEqual(handler.band_info[1].comment, 'not so good.')
self.assertEqual(handler.band_info[2].name, 'Band_3')
self.assertEqual(handler.band_info[2].variableName, 'blue')
self.assertEqual(handler.band_info[2].variableUnit, 'DN')
self.assertEqual(handler.band_info[2].method, 'random')
self.assertEqual(handler.band_info[2].comment, 'random like.')
class TestNetcdfResourceMetaSax(unittest.TestCase):
def setUp(self):
self.parse_sample = """<?xml version="1.0"?>
<!DOCTYPE rdf:RDF PUBLIC "-//DUBLIN CORE//DCMES DTD 2002/07/31//EN"
"http://dublincore.org/documents/2002/07/31/dcmes-xml/dcmes-xml-dtd.dtd">
<rdf:RDF xmlns:dcterms="http://purl.org/dc/terms/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:hsterms="https://www.hydroshare.org/terms/">
<rdf:Description rdf:about="http://localhost:8000/resource/dc52e6aa93154521af08522de27ec276">
<hsterms:netcdfVariable>
<rdf:Description>
<hsterms:shape>Time,south_north,west_east</hsterms:shape>
<hsterms:name>ACLWDNB</hsterms:name>
<hsterms:longName>Long ACLWDNB</hsterms:longName>
<hsterms:missingValue>NA</hsterms:missingValue>
<hsterms:type>Float</hsterms:type>
<hsterms:comment>Something flippant. </hsterms:comment>
<hsterms:unit>J m-2</hsterms:unit>
</rdf:Description>
</hsterms:netcdfVariable>
<hsterms:netcdfVariable>
<rdf:Description>
<hsterms:shape>Time,force_soil_layers</hsterms:shape>
<hsterms:name>T_SOIL_FORCING_TEND</hsterms:name>
<hsterms:longName>Long T_SOIL_FORCING_TEND</hsterms:longName>
<hsterms:missingValue>-999</hsterms:missingValue>
<hsterms:type>Float</hsterms:type>
<hsterms:comment>Something better.</hsterms:comment>
<hsterms:unit>K s-1</hsterms:unit>
</rdf:Description>
</hsterms:netcdfVariable>
<hsterms:netcdfVariable>
<rdf:Description>
<hsterms:shape>Time,south_north,west_east</hsterms:shape>
<hsterms:name>LWUPT</hsterms:name>
<hsterms:longName>Long LWUPT</hsterms:longName>
<hsterms:missingValue>-42424242</hsterms:missingValue>
<hsterms:type>Float</hsterms:type>
<hsterms:comment>Not helpful.</hsterms:comment>
<hsterms:unit>W m-2</hsterms:unit>
</rdf:Description>
</hsterms:netcdfVariable>
</rdf:Description>
</rdf:RDF>
"""
def tearDown(self):
pass
def test_sax_parsing(self):
handler = NetcdfResourceSAXHandler()
xml.sax.parseString(self.parse_sample, handler)
self.assertTrue(len(handler.variables) == 3)
self.assertEqual(handler.variables[0].name, 'ACLWDNB')
self.assertEqual(handler.variables[0].shape, 'Time,south_north,west_east')
self.assertEqual(handler.variables[0].longName, 'Long ACLWDNB')
self.assertEqual(handler.variables[0].missingValue, 'NA')
self.assertEqual(handler.variables[0].type, 'Float')
self.assertEqual(handler.variables[0].comment, 'Something flippant. ')
self.assertEqual(handler.variables[0].unit, 'J m-2')
self.assertEqual(handler.variables[1].name, 'T_SOIL_FORCING_TEND')
self.assertEqual(handler.variables[1].shape, 'Time,force_soil_layers')
self.assertEqual(handler.variables[1].longName, 'Long T_SOIL_FORCING_TEND')
self.assertEqual(handler.variables[1].missingValue, '-999')
self.assertEqual(handler.variables[1].type, 'Float')
self.assertEqual(handler.variables[1].comment, 'Something better.')
self.assertEqual(handler.variables[1].unit, 'K s-1')
self.assertEqual(handler.variables[2].name, 'LWUPT')
self.assertEqual(handler.variables[2].shape, 'Time,south_north,west_east')
self.assertEqual(handler.variables[2].longName, 'Long LWUPT')
self.assertEqual(handler.variables[2].missingValue, '-42424242')
self.assertEqual(handler.variables[2].type, 'Float')
self.assertEqual(handler.variables[2].comment, 'Not helpful.')
self.assertEqual(handler.variables[2].unit, 'W m-2')
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Encounter.notes'
db.add_column(u'dm_encounter', 'notes',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'EncounterTemplate.setup'
db.add_column(u'dm_encountertemplate', 'setup',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
# Adding field 'EncounterTemplate.tactics'
db.add_column(u'dm_encountertemplate', 'tactics',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Encounter.notes'
db.delete_column(u'dm_encounter', 'notes')
# Deleting field 'EncounterTemplate.setup'
db.delete_column(u'dm_encountertemplate', 'setup')
# Deleting field 'EncounterTemplate.tactics'
db.delete_column(u'dm_encountertemplate', 'tactics')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'character_builder.ability': {
'Meta': {'object_name': 'Ability'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'help_text': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.actiontype': {
'Meta': {'object_name': 'ActionType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.alignment': {
'Meta': {'object_name': 'Alignment'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'character_builder.armorclass': {
'Meta': {'object_name': 'ArmorClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.armortype': {
'Meta': {'object_name': 'ArmorType'},
'armor_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.ArmorClass']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.character': {
'Meta': {'object_name': 'Character'},
'age': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'alignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Alignment']"}),
'class_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.ClassType']"}),
'conditions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Condition']", 'symmetrical': 'False', 'blank': 'True'}),
'deity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Deity']"}),
'gender': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Gender']"}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'hit_points': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_hit_points': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Race']"}),
'slug_name': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'xp': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'character_builder.classtype': {
'Meta': {'ordering': "['name']", 'object_name': 'ClassType'},
'armor_proficiencies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.ArmorType']", 'symmetrical': 'False'}),
'base_hit_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {}),
'favored_abilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Ability']", 'symmetrical': 'False'}),
'hit_points_per_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Modifier']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Role']"}),
'role_flavor': ('django.db.models.fields.TextField', [], {}),
'skill_choices': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Source']"}),
'trained_skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['character_builder.Skill']", 'null': 'True', 'blank': 'True'}),
'weapon_proficiencies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.WeaponProficiencyGroup']", 'symmetrical': 'False'})
},
'character_builder.condition': {
'Meta': {'object_name': 'Condition'},
'effect': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'character_builder.defense': {
'Meta': {'object_name': 'Defense'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'abilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Ability']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'character_builder.deity': {
'Meta': {'object_name': 'Deity'},
'alignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Alignment']"}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'character_builder.gender': {
'Meta': {'object_name': 'Gender'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'character_builder.language': {
'Meta': {'object_name': 'Language'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'script': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.modifier': {
'Meta': {'object_name': 'Modifier'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'character_builder.powerkeyword': {
'Meta': {'object_name': 'PowerKeyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.powerrange': {
'Meta': {'object_name': 'PowerRange'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.powerusage': {
'Meta': {'object_name': 'PowerUsage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.race': {
'Meta': {'ordering': "['name']", 'object_name': 'Race'},
'average_height_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'average_weight_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Language']", 'symmetrical': 'False'}),
'modifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Modifier']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'playable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Size']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Source']"}),
'speed': ('django.db.models.fields.IntegerField', [], {}),
'vision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Vision']"})
},
'character_builder.role': {
'Meta': {'object_name': 'Role'},
'flavor': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.size': {
'Meta': {'object_name': 'Size'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'reach': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'space': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.skill': {
'Meta': {'object_name': 'Skill'},
'ability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Ability']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.source': {
'Meta': {'ordering': "['name']", 'object_name': 'Source'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.vision': {
'Meta': {'object_name': 'Vision'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.weaponcategory': {
'Meta': {'object_name': 'WeaponCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'character_builder.weaponproficiencygroup': {
'Meta': {'object_name': 'WeaponProficiencyGroup'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.WeaponCategory']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dm.basicstorynpc': {
'Meta': {'object_name': 'BasicStoryNPC', '_ormbases': ['dm.NPC']},
'description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'npc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.NPC']", 'unique': 'True', 'primary_key': 'True'})
},
'dm.campaign': {
'Meta': {'object_name': 'Campaign'},
'dm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Party']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dm.encounter': {
'Meta': {'object_name': 'Encounter'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Party']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.EncounterTemplate']"})
},
'dm.encounterparticipant': {
'Meta': {'object_name': 'EncounterParticipant'},
'encounter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Encounter']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initiative': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'})
},
'dm.encountertemplate': {
'Meta': {'object_name': 'EncounterTemplate'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'npcs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dm.NPC']", 'symmetrical': 'False'}),
'setup': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tactics': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dm.historyline': {
'Meta': {'object_name': 'HistoryLine'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logged_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Session']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
'dm.monsternpc': {
'Meta': {'object_name': 'MonsterNPC', '_ormbases': ['dm.NPC']},
'hit_points': ('django.db.models.fields.IntegerField', [], {}),
u'npc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.NPC']", 'unique': 'True', 'primary_key': 'True'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.NPCType']"})
},
'dm.npc': {
'Meta': {'object_name': 'NPC'},
'conditions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Condition']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_alive': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'dm.npcencounterparticipant': {
'Meta': {'object_name': 'NPCEncounterParticipant', '_ormbases': ['dm.EncounterParticipant']},
u'encounterparticipant_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.EncounterParticipant']", 'unique': 'True', 'primary_key': 'True'}),
'npc': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.NPC']"})
},
'dm.npctype': {
'Meta': {'object_name': 'NPCType'},
'alignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Alignment']", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'max_hit_points': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Race']"}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Role']", 'symmetrical': 'False'}),
'vision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Vision']"}),
'xp_reward': ('django.db.models.fields.IntegerField', [], {})
},
'dm.npctypeability': {
'Meta': {'object_name': 'NPCTypeAbility'},
'ability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Ability']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'abilities'", 'to': "orm['dm.NPCType']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'dm.npctypedefense': {
'Meta': {'object_name': 'NPCTypeDefense'},
'defense': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Defense']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'defenses'", 'to': "orm['dm.NPCType']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'dm.npctypepower': {
'Meta': {'object_name': 'NPCTypePower'},
'action_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.ActionType']", 'null': 'True', 'blank': 'True'}),
'attack_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.PowerRange']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['character_builder.PowerKeyword']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'powers'", 'to': "orm['dm.NPCType']"}),
'recharge_text': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'usage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.PowerUsage']", 'null': 'True', 'blank': 'True'})
},
'dm.npctypeskill': {
'Meta': {'object_name': 'NPCTypeSkill'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skills'", 'to': "orm['dm.NPCType']"}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Skill']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'dm.party': {
'Meta': {'object_name': 'Party'},
'background': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'characters': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['character_builder.Character']", 'symmetrical': 'False'}),
'formed_on': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dm.pcencounterparticipant': {
'Meta': {'object_name': 'PCEncounterParticipant', '_ormbases': ['dm.EncounterParticipant']},
'character': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['character_builder.Character']"}),
u'encounterparticipant_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.EncounterParticipant']", 'unique': 'True', 'primary_key': 'True'})
},
'dm.session': {
'Meta': {'object_name': 'Session'},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.Campaign']"}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'})
},
'dm.storynpc': {
'Meta': {'object_name': 'StoryNPC', '_ormbases': ['dm.BasicStoryNPC']},
u'basicstorynpc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['dm.BasicStoryNPC']", 'unique': 'True', 'primary_key': 'True'}),
'hit_points': ('django.db.models.fields.IntegerField', [], {}),
'npc_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dm.NPCType']"})
}
}
complete_apps = ['dm']
| |
""" @package antlr3.tree
@brief ANTLR3 runtime package, treewizard module
A utility module to create ASTs at runtime.
See <http://www.antlr.org/wiki/display/~admin/2007/07/02/Exploring+Concept+of+TreeWizard> for an overview. Note that the API of the Python implementation is slightly different.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2008 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
from google.appengine._internal.antlr3.constants import INVALID_TOKEN_TYPE
from google.appengine._internal.antlr3.tokens import CommonToken
from google.appengine._internal.antlr3.tree import CommonTree, CommonTreeAdaptor
def computeTokenTypes(tokenNames):
"""
Compute a dict that is an inverted index of
tokenNames (which maps int token types to names).
"""
if tokenNames is None:
return {}
return dict((name, type) for type, name in enumerate(tokenNames))
## token types for pattern parser
EOF = -1
BEGIN = 1
END = 2
ID = 3
ARG = 4
PERCENT = 5
COLON = 6
DOT = 7
class TreePatternLexer(object):
def __init__(self, pattern):
## The tree pattern to lex like "(A B C)"
self.pattern = pattern
## Index into input string
self.p = -1
## Current char
self.c = None
## How long is the pattern in char?
self.n = len(pattern)
## Set when token type is ID or ARG
self.sval = None
self.error = False
self.consume()
__idStartChar = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
)
__idChar = __idStartChar | frozenset('0123456789')
def nextToken(self):
self.sval = ""
while self.c != EOF:
if self.c in (' ', '\n', '\r', '\t'):
self.consume()
continue
if self.c in self.__idStartChar:
self.sval += self.c
self.consume()
while self.c in self.__idChar:
self.sval += self.c
self.consume()
return ID
if self.c == '(':
self.consume()
return BEGIN
if self.c == ')':
self.consume()
return END
if self.c == '%':
self.consume()
return PERCENT
if self.c == ':':
self.consume()
return COLON
if self.c == '.':
self.consume()
return DOT
if self.c == '[': # grab [x] as a string, returning x
self.consume()
while self.c != ']':
if self.c == '\\':
self.consume()
if self.c != ']':
self.sval += '\\'
self.sval += self.c
else:
self.sval += self.c
self.consume()
self.consume()
return ARG
self.consume()
self.error = True
return EOF
return EOF
def consume(self):
self.p += 1
if self.p >= self.n:
self.c = EOF
else:
self.c = self.pattern[self.p]
class TreePatternParser(object):
def __init__(self, tokenizer, wizard, adaptor):
self.tokenizer = tokenizer
self.wizard = wizard
self.adaptor = adaptor
self.ttype = tokenizer.nextToken() # kickstart
def pattern(self):
if self.ttype == BEGIN:
return self.parseTree()
elif self.ttype == ID:
node = self.parseNode()
if self.ttype == EOF:
return node
return None # extra junk on end
return None
def parseTree(self):
if self.ttype != BEGIN:
return None
self.ttype = self.tokenizer.nextToken()
root = self.parseNode()
if root is None:
return None
while self.ttype in (BEGIN, ID, PERCENT, DOT):
if self.ttype == BEGIN:
subtree = self.parseTree()
self.adaptor.addChild(root, subtree)
else:
child = self.parseNode()
if child is None:
return None
self.adaptor.addChild(root, child)
if self.ttype != END:
return None
self.ttype = self.tokenizer.nextToken()
return root
def parseNode(self):
# "%label:" prefix
label = None
if self.ttype == PERCENT:
self.ttype = self.tokenizer.nextToken()
if self.ttype != ID:
return None
label = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if self.ttype != COLON:
return None
self.ttype = self.tokenizer.nextToken() # move to ID following colon
# Wildcard?
if self.ttype == DOT:
self.ttype = self.tokenizer.nextToken()
wildcardPayload = CommonToken(0, ".")
node = WildcardTreePattern(wildcardPayload)
if label is not None:
node.label = label
return node
# "ID" or "ID[arg]"
if self.ttype != ID:
return None
tokenName = self.tokenizer.sval
self.ttype = self.tokenizer.nextToken()
if tokenName == "nil":
return self.adaptor.nil()
text = tokenName
# check for arg
arg = None
if self.ttype == ARG:
arg = self.tokenizer.sval
text = arg
self.ttype = self.tokenizer.nextToken()
# create node
treeNodeType = self.wizard.getTokenType(tokenName)
if treeNodeType == INVALID_TOKEN_TYPE:
return None
node = self.adaptor.createFromType(treeNodeType, text)
if label is not None and isinstance(node, TreePattern):
node.label = label
if arg is not None and isinstance(node, TreePattern):
node.hasTextArg = True
return node
class TreePattern(CommonTree):
"""
When using %label:TOKENNAME in a tree for parse(), we must
track the label.
"""
def __init__(self, payload):
CommonTree.__init__(self, payload)
self.label = None
self.hasTextArg = None
def toString(self):
if self.label is not None:
return '%' + self.label + ':' + CommonTree.toString(self)
else:
return CommonTree.toString(self)
class WildcardTreePattern(TreePattern):
pass
class TreePatternTreeAdaptor(CommonTreeAdaptor):
"""This adaptor creates TreePattern objects for use during scan()"""
def createWithPayload(self, payload):
return TreePattern(payload)
class TreeWizard(object):
"""
Build and navigate trees with this object. Must know about the names
of tokens so you have to pass in a map or array of token names (from which
this class can build the map). I.e., Token DECL means nothing unless the
class can translate it to a token type.
In order to create nodes and navigate, this class needs a TreeAdaptor.
This class can build a token type -> node index for repeated use or for
iterating over the various nodes with a particular type.
This class works in conjunction with the TreeAdaptor rather than moving
all this functionality into the adaptor. An adaptor helps build and
navigate trees using methods. This class helps you do it with string
patterns like "(A B C)". You can create a tree from that pattern or
match subtrees against it.
"""
def __init__(self, adaptor=None, tokenNames=None, typeMap=None):
self.adaptor = adaptor
if typeMap is None:
self.tokenNameToTypeMap = computeTokenTypes(tokenNames)
else:
if tokenNames is not None:
raise ValueError("Can't have both tokenNames and typeMap")
self.tokenNameToTypeMap = typeMap
def getTokenType(self, tokenName):
"""Using the map of token names to token types, return the type."""
try:
return self.tokenNameToTypeMap[tokenName]
except KeyError:
return INVALID_TOKEN_TYPE
def create(self, pattern):
"""
Create a tree or node from the indicated tree pattern that closely
follows ANTLR tree grammar tree element syntax:
(root child1 ... child2).
You can also just pass in a node: ID
Any node can have a text argument: ID[foo]
(notice there are no quotes around foo--it's clear it's a string).
nil is a special name meaning "give me a nil node". Useful for
making lists: (nil A B C) is a list of A B C.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, self.adaptor)
return parser.pattern()
def index(self, tree):
"""Walk the entire tree and make a node name to nodes mapping.
For now, use recursion but later nonrecursive version may be
more efficient. Returns a dict int -> list where the list is
of your AST node type. The int is the token type of the node.
"""
m = {}
self._index(tree, m)
return m
def _index(self, t, m):
"""Do the work for index"""
if t is None:
return
ttype = self.adaptor.getType(t)
elements = m.get(ttype)
if elements is None:
m[ttype] = elements = []
elements.append(t)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._index(child, m)
def find(self, tree, what):
"""Return a list of matching token.
what may either be an integer specifzing the token type to find or
a string with a pattern that must be matched.
"""
if isinstance(what, (int, long)):
return self._findTokenType(tree, what)
elif isinstance(what, basestring):
return self._findPattern(tree, what)
else:
raise TypeError("'what' must be string or integer")
def _findTokenType(self, t, ttype):
"""Return a List of tree nodes with token type ttype"""
nodes = []
def visitor(tree, parent, childIndex, labels):
nodes.append(tree)
self.visit(t, ttype, visitor)
return nodes
def _findPattern(self, t, pattern):
"""Return a List of subtrees matching pattern."""
subtrees = []
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return None
rootTokenType = tpattern.getType()
def visitor(tree, parent, childIndex, label):
if self._parse(tree, tpattern, None):
subtrees.append(tree)
self.visit(t, rootTokenType, visitor)
return subtrees
def visit(self, tree, what, visitor):
"""Visit every node in tree matching what, invoking the visitor.
If what is a string, it is parsed as a pattern and only matching
subtrees will be visited.
The implementation uses the root node of the pattern in combination
with visit(t, ttype, visitor) so nil-rooted patterns are not allowed.
Patterns with wildcard roots are also not allowed.
If what is an integer, it is used as a token type and visit will match
all nodes of that type (this is faster than the pattern match).
The labels arg of the visitor action method is never set (it's None)
since using a token type rather than a pattern doesn't let us set a
label.
"""
if isinstance(what, (int, long)):
self._visitType(tree, None, 0, what, visitor)
elif isinstance(what, basestring):
self._visitPattern(tree, what, visitor)
else:
raise TypeError("'what' must be string or integer")
def _visitType(self, t, parent, childIndex, ttype, visitor):
"""Do the recursive work for visit"""
if t is None:
return
if self.adaptor.getType(t) == ttype:
visitor(t, parent, childIndex, None)
for i in range(self.adaptor.getChildCount(t)):
child = self.adaptor.getChild(t, i)
self._visitType(child, t, i, ttype, visitor)
def _visitPattern(self, tree, pattern, visitor):
"""
For all subtrees that match the pattern, execute the visit action.
"""
# Create a TreePattern from the pattern
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
# don't allow invalid patterns
if (tpattern is None or tpattern.isNil()
or isinstance(tpattern, WildcardTreePattern)):
return
rootTokenType = tpattern.getType()
def rootvisitor(tree, parent, childIndex, labels):
labels = {}
if self._parse(tree, tpattern, labels):
visitor(tree, parent, childIndex, labels)
self.visit(tree, rootTokenType, rootvisitor)
def parse(self, t, pattern, labels=None):
"""
Given a pattern like (ASSIGN %lhs:ID %rhs:.) with optional labels
on the various nodes and '.' (dot) as the node/subtree wildcard,
return true if the pattern matches and fill the labels Map with
the labels pointing at the appropriate nodes. Return false if
the pattern is malformed or the tree does not match.
If a node specifies a text arg in pattern, then that must match
for that node in t.
"""
tokenizer = TreePatternLexer(pattern)
parser = TreePatternParser(tokenizer, self, TreePatternTreeAdaptor())
tpattern = parser.pattern()
return self._parse(t, tpattern, labels)
def _parse(self, t1, t2, labels):
"""
Do the work for parse. Check to see if the t2 pattern fits the
structure and token types in t1. Check text if the pattern has
text arguments on nodes. Fill labels map with pointers to nodes
in tree matched against nodes in pattern with labels.
"""
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots (wildcard matches anything)
if not isinstance(t2, WildcardTreePattern):
if self.adaptor.getType(t1) != t2.getType():
return False
if t2.hasTextArg and self.adaptor.getText(t1) != t2.getText():
return False
if t2.label is not None and labels is not None:
# map label in pattern to node in t1
labels[t2.label] = t1
# check children
n1 = self.adaptor.getChildCount(t1)
n2 = t2.getChildCount()
if n1 != n2:
return False
for i in range(n1):
child1 = self.adaptor.getChild(t1, i)
child2 = t2.getChild(i)
if not self._parse(child1, child2, labels):
return False
return True
def equals(self, t1, t2, adaptor=None):
"""
Compare t1 and t2; return true if token types/text, structure match
exactly.
The trees are examined in their entirety so that (A B) does not match
(A B C) nor (A (B C)).
"""
if adaptor is None:
adaptor = self.adaptor
return self._equals(t1, t2, adaptor)
def _equals(self, t1, t2, adaptor):
# make sure both are non-null
if t1 is None or t2 is None:
return False
# check roots
if adaptor.getType(t1) != adaptor.getType(t2):
return False
if adaptor.getText(t1) != adaptor.getText(t2):
return False
# check children
n1 = adaptor.getChildCount(t1)
n2 = adaptor.getChildCount(t2)
if n1 != n2:
return False
for i in range(n1):
child1 = adaptor.getChild(t1, i)
child2 = adaptor.getChild(t2, i)
if not self._equals(child1, child2, adaptor):
return False
return True
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Decorator that produces a callable object that executes a TensorFlow graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import tape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _default_initializer(name, shape, dtype):
"""The default initializer for variables."""
# pylint: disable=protected-access
store = variable_scope._get_default_variable_store()
initializer = store._get_default_initializer(name, shape=shape, dtype=dtype)
# pylint: enable=protected-access
return initializer[0]
class _CapturedVariable(object):
"""Variable captured by graph_callable.
Internal to the implementation of graph_callable. Created only by
_VariableCapturingScope and used only to read the variable values when calling
the function after the variables are initialized.
"""
def __init__(self, name, initializer, shape, dtype, trainable):
self.name = name
if initializer is None:
initializer = _default_initializer(name, shape, dtype)
initial_value = lambda: initializer(shape, dtype=dtype)
with context.eager_mode():
self.variable = resource_variable_ops.ResourceVariable(
initial_value=initial_value, name=name, dtype=dtype,
trainable=trainable)
self.shape = shape
self.dtype = dtype
self.placeholder = None
self.trainable = trainable
def read(self, want_gradients=True):
if want_gradients and self.trainable:
v = tape.watch_variable(self.variable)
else:
v = self.variable
return v.read_value()
class _VariableCapturingScope(object):
"""Variable-scope-like object which captures tf.get_variable calls.
This is responsible for the main difference between the initialization version
of a function object and the calling version of a function object.
capturing_scope replaces calls to tf.get_variable with placeholder tensors to
be fed the variable's current value. TODO(apassos): these placeholders should
instead be objects implementing a similar API to tf.Variable, for full
compatibility.
initializing_scope replaces calls to tf.get_variable with creation of
variables and initialization of their values. This allows eventual support of
initialized_value and friends.
TODO(apassos): once the eager mode layers API is implemented support eager
func-to-object as well.
"""
def __init__(self):
self.variables = {}
self.tf_variables = {}
@contextlib.contextmanager
def capturing_scope(self):
"""Context manager to capture variable creations.
Replaces variable accesses with placeholders.
Yields:
nothing
"""
# TODO(apassos) ignoring the regularizer and partitioner here; figure out
# how to deal with these.
def _custom_getter(getter=None, name=None, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None, # pylint: disable=redefined-outer-name
partitioner=None, validate_shape=True,
use_resource=None):
del getter, regularizer, partitioner, validate_shape, use_resource, dtype
del collections, initializer, trainable, reuse, caching_device, shape,
assert name in self.variables
v = self.variables[name]
return v.variable
scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(scope, custom_getter=_custom_getter):
yield
@contextlib.contextmanager
def initializing_scope(self):
"""Context manager to capture variable creations.
Forcibly initializes all created variables.
Yields:
nothing
"""
# TODO(apassos) ignoring the regularizer and partitioner here; figure out
# how to deal with these.
def _custom_getter(getter=None, name=None, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None, # pylint: disable=redefined-outer-name
partitioner=None, validate_shape=True,
use_resource=None):
del getter, regularizer, collections, caching_device, partitioner
del use_resource, validate_shape
if name in self.tf_variables:
if reuse:
return self.tf_variables[name].initialized_value()
else:
raise ValueError("Specified reuse=%s but tried to reuse variables."
% reuse)
# TODO(apassos): ensure this is on the same device as above
v = _CapturedVariable(name, initializer, shape, dtype, trainable)
self.variables[name] = v
graph_mode_resource = v.variable.handle
if initializer is None:
initializer = _default_initializer(name, shape, dtype)
resource_variable_ops.shape_safe_assign_variable_handle(
graph_mode_resource, v.variable.shape, initializer(shape, dtype))
return v.variable
scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(scope, custom_getter=_custom_getter):
yield
class _FunctionObject(function._GraphModeFunction): # pylint: disable=protected-access
"""Captured graph-mode function with read-only variables.
Calling this function object will read the current values of the variables and
pass them to the graph mode function, which will use them as constants.
"""
def __init__(self, variables, placeholder_inputs, extra_inputs, fdef,
graph, operations, outputs, func_outputs_to_fdef_outputs,
output_shapes):
self._variables = variables
super(_FunctionObject, self).__init__(
placeholder_inputs,
extra_inputs,
fdef,
graph,
operations,
outputs,
func_outputs_to_fdef_outputs,
output_shapes)
@property
def variables(self):
return [x.variable for x in self._variables]
class _InitializingFunctionObject(object):
"""Responsible for deciding which version of func-to-object to call.
call_fn is the version which calls the function with the current values of the
variables and init_fn is the version which calls the function to initialize
all variables.
TODO(apassos): figure out a way to support initializing only _some_
variables. This requires a way to pull out a variable's initialization code
from the graph, which might not be possible in general.
"""
def __init__(self, call_fn, init_fn, shape_and_dtypes):
self._init_fn = init_fn
self._call_fn = call_fn
self.shape_and_dtypes = shape_and_dtypes
self.flattened_shapes = [tensor_shape.as_shape(sd.shape) for sd in
nest.flatten(self.shape_and_dtypes)]
@property
def variables(self):
return self._call_fn.variables
def __call__(self, *args):
nest.assert_same_structure(self.shape_and_dtypes, args, check_types=False)
if not all([
shape.is_compatible_with(arg.shape)
for shape, arg in zip(self.flattened_shapes, nest.flatten(args))
]):
raise ValueError(
"Declared shapes do not match argument shapes: Expected %s, found %s."
% (self.flattened_shapes, [arg.shape for arg in nest.flatten(args)]))
initialized = [resource_variable_ops.var_is_initialized_op(
v.handle).numpy() for v in self._call_fn.variables]
if all(x for x in initialized):
for v in self._call_fn.variables:
if v._trainable: # pylint: disable=protected-access
tape.watch_variable(v)
return self._call_fn(*args)
elif all(not x for x in initialized):
return self._init_fn(*args)
else:
raise ValueError("Some, but not all, variables are initialized.")
def _get_graph_callable_inputs(shape_and_dtypes):
"""Maps specified shape_and_dtypes to graph inputs."""
ret = []
for x in shape_and_dtypes:
if isinstance(x, ShapeAndDtype):
ret.append(array_ops.placeholder(x.dtype, x.shape))
elif isinstance(x, (tuple, list)):
ret.append(_get_graph_callable_inputs(x))
else:
raise errors.InvalidArgumentError(
None, None, "Expected the argument to @graph_callable to be a "
"(possibly nested) list or tuple of ShapeAndDtype objects, "
"but got an object of type: %s" % type(x))
return tuple(ret) if isinstance(shape_and_dtypes, tuple) else ret
def _graph_callable_internal(func, shape_and_dtypes):
"""Defines and returns a template version of func.
Under the hood we make two function objects, each wrapping a different version
of the graph-mode code. One version immediately runs variable initialization
before making the variable's Tensors available for use, while the other
version replaces the Variables with placeholders which become function
arguments and get the current variable's value.
Limitations in (2) and (4) are because this does not implement a graph-mode
Variable class which has a convert_to_tensor(as_ref=True) method and a
initialized_value method. This is fixable.
Args:
func: The tfe Python function to compile.
shape_and_dtypes: A possibly nested list or tuple of ShapeAndDtype objects.
Raises:
ValueError: If any one of func's outputs is not a Tensor.
Returns:
Callable graph object.
"""
container = tf_ops.get_default_graph()._container # pylint: disable=protected-access
container_prefix = tf_ops.get_default_graph()._container_prefix # pylint: disable=protected-access
with context.graph_mode():
# This graph will store both the initialization and the call version of the
# wrapped function. It will later be used by the backprop code to build the
# backprop graph, if necessary.
captures = {}
tmp_graph = function.CapturingGraph(captures)
# Inherit the container from the original graph to create resources at user
# expected containers. Also inherits the container prefix, since this is
# used for error checking when isolating Eager execution (the container
# prefix at creation must match the container prefix when used, and
# variables returned from the graph callable will be used in the outside
# context).
tmp_graph._container = container # pylint: disable=protected-access
tmp_graph._container_prefix = container_prefix # pylint: disable=protected-access
with tmp_graph.as_default():
# Placeholders for the non-variable inputs.
func_inputs = _get_graph_callable_inputs(shape_and_dtypes)
func_num_args = len(tf_inspect.getargspec(func).args)
if len(func_inputs) != func_num_args:
raise TypeError("The number of arguments accepted by the decorated "
"function `%s` (%d) must match the number of "
"ShapeAndDtype objects passed to the graph_callable() "
"decorator (%d)." %
(func.__name__, func_num_args, len(func_inputs)))
# First call the function to generate a graph which can initialize all
# variables. As a side-effect this will populate the variable capturing
# scope's view of which variables exist.
variable_captures = _VariableCapturingScope()
with variable_captures.initializing_scope(), function.capture_tensors(
captures):
func_outputs = func(*func_inputs)
outputs_list = nest.flatten(func_outputs)
if len(outputs_list) == 1 and outputs_list[0] is None:
outputs_list = []
output_shapes = [x.shape for x in outputs_list]
if not all(isinstance(x, tf_ops.Tensor) for x in outputs_list):
raise ValueError("Found non-tensor output in %s" % str(outputs_list))
initializing_operations = tmp_graph.get_operations()
# Call the function again, now replacing usages of variables with
# placeholders. This assumes the variable capturing scope created above
# knows about all variables.
with variable_captures.capturing_scope(), function.capture_tensors(
captures):
captured_outputs = func(*func_inputs)
captured_outlist = nest.flatten(captured_outputs)
capturing_operations = tmp_graph.get_operations()[
len(initializing_operations):]
sorted_variables = sorted(variable_captures.variables.values(),
key=lambda x: x.name)
ids = list(sorted(captures.keys()))
if ids:
extra_inputs, extra_placeholders = zip(*[captures[x] for x in ids])
else:
extra_inputs = []
extra_placeholders = []
flat_inputs = [x for x in nest.flatten(func_inputs)
if isinstance(x, tf_ops.Tensor)]
placeholder_inputs = flat_inputs+ list(extra_placeholders)
func_def_outputs = [x for x in outputs_list if isinstance(x, tf_ops.Tensor)]
initializer_function_def = function.make_function_def(
tmp_graph,
initializing_operations,
placeholder_inputs,
func_def_outputs)
# TODO(ashankar): Oh lord, forgive me for this lint travesty.
# Also, what about the gradient registry of these functions? Those need to be
# addressed as well.
for f in tmp_graph._functions.values(): # pylint: disable=protected-access
function._register_with_name(f.name, f.definition) # pylint: disable=protected-access
function._register_with_name(function._inference_name(func.__name__), # pylint: disable=protected-access
initializer_function_def)
initializer_function = function._GraphModeFunction( # pylint: disable=protected-access
placeholder_inputs,
extra_inputs,
initializer_function_def,
tmp_graph,
initializing_operations,
func_outputs,
function._map_sequence_obj_to_idx(func_def_outputs), # pylint: disable=protected-access
output_shapes)
capture_func_def_outputs = [
x for x in captured_outlist if isinstance(x, tf_ops.Tensor)]
captured_function_def = function.make_function_def(
tmp_graph,
capturing_operations,
placeholder_inputs,
capture_func_def_outputs)
function._register_with_name(function._inference_name(func.__name__), # pylint: disable=protected-access
captured_function_def)
captured_function = _FunctionObject(
sorted_variables,
placeholder_inputs,
extra_inputs,
captured_function_def,
tmp_graph,
capturing_operations,
captured_outputs,
function._map_sequence_obj_to_idx(capture_func_def_outputs), # pylint: disable=protected-access
output_shapes)
return _InitializingFunctionObject(captured_function, initializer_function,
shape_and_dtypes)
class ShapeAndDtype(object):
"""Data type that packages together shape and type information.
Used for arguments to graph callables. See graph_callable() for an example.
"""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
def graph_callable(shape_and_dtypes):
"""Decorator that produces a callable that executes a TensorFlow graph.
When applied on a function that constructs a TensorFlow graph, this decorator
produces a callable object that:
1. Executes the graph when invoked. The first call will initialize any
variables defined in the graph.
2. Provides a .variables() method to return the list of TensorFlow variables
defined in the graph.
Note that the wrapped function is not allowed to change the values of the
variables, just use them.
The return value of the wrapped function must be one of the following:
(1) None, (2) a Tensor, or (3) a possibly nested sequence of Tensors.
Example:
```python
@tfe.graph_callable([tfe.ShapeAndDtype(shape(), dtype=dtypes.float32)])
def foo(x):
v = tf.get_variable('v', initializer=tf.ones_initializer(), shape=())
return v + x
ret = foo(tfe.Tensor(2.0)) # `ret` here is a Tensor with value 3.0.
foo.variables[0].assign(7.0) # Modify the value of variable `v`.
ret = foo(tfe.Tensor(2.0)) # `ret` here now is a Tensor with value 9.0.
```
Args:
shape_and_dtypes: A possibly nested list or tuple of ShapeAndDtype objects
that specifies shape and type information for each of the callable's
arguments. The length of this list must be equal to the number of
arguments accepted by the wrapped function.
Returns:
A callable graph object.
"""
# TODO(alive,apassos): support initialized_value and friends from tf.Variable.
assert context.in_eager_mode(), (
"graph_callable can only be used when Eager execution is enabled.")
def decorator(func):
return tf_decorator.make_decorator(func,
_graph_callable_internal(
func, shape_and_dtypes))
return decorator
| |
"""
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import DEFAULT_CACHE_ALIAS, caches
from django.utils.cache import (
get_cache_key, get_max_age, has_vary_header, learn_cache_key,
patch_response_headers,
)
from django.utils.deprecation import MiddlewareMixin
class UpdateCacheMiddleware(MiddlewareMixin):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in MIDDLEWARE
so that it'll get called last during the response phase.
"""
# RemovedInDjango40Warning: when the deprecation ends, replace with:
# def __init__(self, get_response):
def __init__(self, get_response=None):
self._get_response_none_deprecation(get_response)
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.page_timeout = None
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
self.get_response = get_response
def _should_update_cache(self, request, response):
return hasattr(request, '_cache_update_cache') and request._cache_update_cache
def process_response(self, request, response):
"""Set the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if response.streaming or response.status_code not in (200, 304):
return response
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):
return response
# Don't cache a response with 'Cache-Control: private'
if 'private' in response.get('Cache-Control', ()):
return response
# Page timeout takes precedence over the "max-age" and the default
# cache timeout.
timeout = self.page_timeout
if timeout is None:
# The timeout from the "max-age" section of the "Cache-Control"
# header takes precedence over the default cache timeout.
timeout = get_max_age(response)
if timeout is None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't cache.
return response
patch_response_headers(response, timeout)
if timeout and response.status_code == 200:
cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache)
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(MiddlewareMixin):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in MIDDLEWARE
so that it'll get called last during the request phase.
"""
# RemovedInDjango40Warning: when the deprecation ends, replace with:
# def __init__(self, get_response):
def __init__(self, get_response=None):
self._get_response_none_deprecation(get_response)
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
self.get_response = get_response
def process_request(self, request):
"""
Check whether the page is already cached and return the cached
version if available.
"""
if request.method not in ('GET', 'HEAD'):
request._cache_update_cache = False
return None # Don't bother checking the cache.
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, 'GET', cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == 'HEAD':
cache_key = get_cache_key(request, self.key_prefix, 'HEAD', cache=self.cache)
response = self.cache.get(cache_key)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
# RemovedInDjango40Warning: when the deprecation ends, replace with:
# def __init__(self, get_response, cache_timeout=None, page_timeout=None, **kwargs):
def __init__(self, get_response=None, cache_timeout=None, page_timeout=None, **kwargs):
self._get_response_none_deprecation(get_response)
self.get_response = get_response
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
try:
key_prefix = kwargs['key_prefix']
if key_prefix is None:
key_prefix = ''
except KeyError:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.key_prefix = key_prefix
try:
cache_alias = kwargs['cache_alias']
if cache_alias is None:
cache_alias = DEFAULT_CACHE_ALIAS
except KeyError:
cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache_alias = cache_alias
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.cache_timeout = cache_timeout
self.page_timeout = page_timeout
self.cache = caches[self.cache_alias]
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import logging
import json
from django import http
from django.conf import settings
from django.shortcuts import redirect
from django.template.loader import render_to_string
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from horizon import forms
from openstack_dashboard import api
from openstack_dashboard import fiware_api
from openstack_dashboard.dashboards.idm_admin.user_accounts \
import forms as user_accounts_forms
from openstack_dashboard.dashboards.idm_admin \
import utils as idm_admin_utils
from openstack_dashboard.fiware_auth import views as fiware_auth
from openstack_dashboard.utils import email as email_utils
LOG = logging.getLogger('idm_logger')
FIWARE_DEFAULT_DURATION = getattr(settings, 'FIWARE_DEFAULT_DURATION')
KEYSTONE_TRIAL_ROLE = getattr(settings, 'KEYSTONE_TRIAL_ROLE')
KEYSTONE_BASIC_ROLE = getattr(settings, 'KEYSTONE_BASIC_ROLE')
KEYSTONE_COMMUNITY_ROLE = getattr(settings, 'KEYSTONE_COMMUNITY_ROLE')
def _current_account(request, user_id):
# TODO(garcianavalon) find a better solution to this
user_roles = [
a.role['id'] for a
in fiware_api.keystone.role_assignments_list(request,
user=user_id, domain='default')
]
fiware_roles = user_accounts_forms.get_account_choices()
return next((role for role in fiware_roles
if role[0] in user_roles), (None, None))
def _current_regions(request, cloud_project_id):
endpoint_groups = fiware_api.keystone.list_endpoint_groups_for_project(
request, cloud_project_id)
current_regions = []
for endpoint_group in endpoint_groups:
if 'region_id' in endpoint_group.filters:
current_regions.append(endpoint_group.filters['region_id'])
return current_regions
class FindUserView(forms.ModalFormView):
form_class = user_accounts_forms.FindUserByEmailForm
template_name = 'idm_admin/user_accounts/index.html'
def dispatch(self, request, *args, **kwargs):
if idm_admin_utils.is_current_user_administrator(request):
return super(FindUserView, self).dispatch(request, *args, **kwargs)
else:
return redirect('horizon:user_home')
class UpdateAccountView(forms.ModalFormView):
form_class = user_accounts_forms.UpdateAccountForm
template_name = 'idm_admin/user_accounts/update.html'
success_url = 'horizon:idm_admin:user_accounts:update'
def dispatch(self, request, *args, **kwargs):
if idm_admin_utils.is_current_user_administrator(request):
self.user = fiware_api.keystone.user_get(request,
kwargs['user_id'])
return super(UpdateAccountView, self).dispatch(request, *args, **kwargs)
else:
return redirect('horizon:user_home')
def get_context_data(self, **kwargs):
context = super(UpdateAccountView, self).get_context_data(**kwargs)
user = self.user
context['user'] = user
context['allowed_regions'] = json.dumps(
getattr(settings, 'FIWARE_ALLOWED_REGIONS', None))
context['default_durations'] = json.dumps(FIWARE_DEFAULT_DURATION)
account_type = _current_account(self.request, user.id)[1]
account_info = {
'account_type': account_type,
'started_at': getattr(user, str(account_type) + '_started_at', None),
'duration': getattr(user, str(account_type) + '_duration',
FIWARE_DEFAULT_DURATION.get(account_type)),
'regions': _current_regions(self.request, user.cloud_project_id)
}
if account_info['started_at'] and account_info['duration']:
start_date = datetime.datetime.strptime(account_info['started_at'], '%Y-%m-%d')
end_date = start_date + datetime.timedelta(days=account_info['duration'])
account_info['end_date'] = end_date.strftime('%Y-%m-%d')
context['account_info'] = account_info
return context
def get_initial(self):
initial = super(UpdateAccountView, self).get_initial()
user_id = self.user.id
current_account = _current_account(self.request, user_id)
current_regions = _current_regions(self.request, self.user.cloud_project_id)
initial.update({
'user_id': user_id,
'regions': [(region_id, region_id) for region_id in current_regions],
'account_type': current_account[0],
})
return initial
class UpdateAccountEndpointView(View, user_accounts_forms.UserAccountsLogicMixin):
""" Upgrade account logic without the form"""
http_method_names = ['post']
use_idm_account = True
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
# Check there is a valid keystone token in the header
token = request.META.get('HTTP_X_AUTH_TOKEN', None)
if not token:
return http.HttpResponse('Unauthorized', status=401)
try:
idm_admin_utils.is_user_administrator_from_token(request, token=token)
except Exception:
return http.HttpResponse('Unauthorized', status=401)
return super(UpdateAccountEndpointView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
try:
data = json.loads(request.body)
user = fiware_api.keystone.user_get(request, data['user_id'])
role_id = data['role_id']
if (role_id == fiware_api.keystone.get_trial_role(
request).id):
trial_left = self._max_trial_users_reached(request)
if not trial_left:
return http.HttpResponseNotAllowed()
regions = data.get('regions', [])
if (role_id != fiware_api.keystone.get_basic_role(
request).id
and not regions):
return http.HttpResponseBadRequest()
self.update_account(request, user, role_id, regions)
if data.get('notify'):
account_type = _current_account(self.request, user.id)[1]
content = {
'regions': _current_regions(self.request, user.cloud_project_id),
'user':user,
'account_type': account_type,
'started_at': getattr(user, account_type + '_started_at', None),
'duration': getattr(user, account_type + '_duration',
FIWARE_DEFAULT_DURATION.get(account_type)),
'show_cloud_info': account_type in ['trial', 'community'],
}
if content['started_at'] and content['duration']:
start_date = datetime.datetime.strptime(content['started_at'], '%Y-%m-%d')
end_date = start_date + datetime.timedelta(days=content['duration'])
content['end_date'] = end_date.strftime('%Y-%m-%d')
email_utils.send_account_status_change_email(
user=user,
content=content)
return http.HttpResponse()
except Exception as exception:
return http.HttpResponseServerError(str(exception), content_type="text/plain")
class NotifyUsersEndpointView(View):
"""Notify a list of users that their resources are about to be deleted."""
http_method_names = ['post']
@csrf_exempt
def dispatch(self, request, *args, **kwargs):
# Check there is a valid keystone token in the header
token = request.META.get('HTTP_X_AUTH_TOKEN', None)
if not token:
return http.HttpResponse('Unauthorized', status=401)
try:
idm_admin_utils.is_user_administrator_from_token(request, token=token)
except Exception:
return http.HttpResponse('Unauthorized', status=401)
return super(NotifyUsersEndpointView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
data = json.loads(request.body)
errors = []
for user_id in data['users']:
try:
user = fiware_api.keystone.user_get(request, user_id)
account_type = _current_account(self.request, user.id)[1]
if not account_type:
errors.append((user_id, 'Has no basic, trial or community rol assigned.'))
continue
elif account_type == 'basic':
errors.append((user_id, 'User is basic.'))
continue
content = {
'regions': _current_regions(self.request, user.cloud_project_id),
'user':user,
'account_type': account_type,
'started_at': getattr(user, account_type + '_started_at'),
'duration': getattr(user, account_type + '_duration',
FIWARE_DEFAULT_DURATION.get(account_type)),
'show_cloud_info': account_type in ['trial', 'community'],
}
# NOTE(garcianavalon) there should always be an end date in this email
# if content.get('started_at') and content.get('duration'):
start_date = datetime.datetime.strptime(content['started_at'], '%Y-%m-%d')
end_date = start_date + datetime.timedelta(days=content['duration'])
content['end_date'] = end_date.strftime('%Y-%m-%d')
email_utils.send_account_status_expire_email(
user=user,
content=content)
except Exception as exception:
errors.append((user_id, str(exception)))
continue
return http.HttpResponse(json.dumps({'error_users': errors}),
content_type="application/json")
| |
from __future__ import unicode_literals
import re
import unittest
from unittest import skipUnless
from django.db import connection
from django.contrib.gis import gdal
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import (
HAS_SPATIAL_DB, no_mysql, no_oracle, no_spatialite,
mysql, oracle, postgis, spatialite)
from django.test import TestCase
from django.utils import six
if HAS_GEOS:
from django.contrib.gis.geos import (fromstr, GEOSGeometry,
Point, LineString, LinearRing, Polygon, GeometryCollection)
from .models import Country, City, PennsylvaniaCity, State, Track
if HAS_GEOS and not spatialite:
from .models import Feature, MinusOneSRID
def postgis_bug_version():
spatial_version = getattr(connection.ops, "spatial_version", (0, 0, 0))
return spatial_version and (2, 0, 0) <= spatial_version <= (2, 0, 1)
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class GeoModelTest(TestCase):
def test_fixtures(self):
"Testing geographic model initialization from fixtures."
# Ensuring that data was loaded from initial data fixtures.
self.assertEqual(2, Country.objects.count())
self.assertEqual(8, City.objects.count())
self.assertEqual(2, State.objects.count())
def test_proxy(self):
"Testing Lazy-Geometry support (using the GeometryProxy)."
## Testing on a Point
pnt = Point(0, 0)
nullcity = City(name='NullCity', point=pnt)
nullcity.save()
# Making sure TypeError is thrown when trying to set with an
# incompatible type.
for bad in [5, 2.0, LineString((0, 0), (1, 1))]:
try:
nullcity.point = bad
except TypeError:
pass
else:
self.fail('Should throw a TypeError')
# Now setting with a compatible GEOS Geometry, saving, and ensuring
# the save took, notice no SRID is explicitly set.
new = Point(5, 23)
nullcity.point = new
# Ensuring that the SRID is automatically set to that of the
# field after assignment, but before saving.
self.assertEqual(4326, nullcity.point.srid)
nullcity.save()
# Ensuring the point was saved correctly after saving
self.assertEqual(new, City.objects.get(name='NullCity').point)
# Setting the X and Y of the Point
nullcity.point.x = 23
nullcity.point.y = 5
# Checking assignments pre & post-save.
self.assertNotEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.save()
self.assertEqual(Point(23, 5), City.objects.get(name='NullCity').point)
nullcity.delete()
## Testing on a Polygon
shell = LinearRing((0, 0), (0, 100), (100, 100), (100, 0), (0, 0))
inner = LinearRing((40, 40), (40, 60), (60, 60), (60, 40), (40, 40))
# Creating a State object using a built Polygon
ply = Polygon(shell, inner)
nullstate = State(name='NullState', poly=ply)
self.assertEqual(4326, nullstate.poly.srid) # SRID auto-set from None
nullstate.save()
ns = State.objects.get(name='NullState')
self.assertEqual(ply, ns.poly)
# Testing the `ogr` and `srs` lazy-geometry properties.
if gdal.HAS_GDAL:
self.assertEqual(True, isinstance(ns.poly.ogr, gdal.OGRGeometry))
self.assertEqual(ns.poly.wkb, ns.poly.ogr.wkb)
self.assertEqual(True, isinstance(ns.poly.srs, gdal.SpatialReference))
self.assertEqual('WGS 84', ns.poly.srs.name)
# Changing the interior ring on the poly attribute.
new_inner = LinearRing((30, 30), (30, 70), (70, 70), (70, 30), (30, 30))
ns.poly[1] = new_inner
ply[1] = new_inner
self.assertEqual(4326, ns.poly.srid)
ns.save()
self.assertEqual(ply, State.objects.get(name='NullState').poly)
ns.delete()
@no_mysql
def test_lookup_insert_transform(self):
"Testing automatic transform for lookups and inserts."
# San Antonio in 'WGS84' (SRID 4326)
sa_4326 = 'POINT (-98.493183 29.424170)'
wgs_pnt = fromstr(sa_4326, srid=4326) # Our reference point in WGS84
# Oracle doesn't have SRID 3084, using 41157.
if oracle:
# San Antonio in 'Texas 4205, Southern Zone (1983, meters)' (SRID 41157)
# Used the following Oracle SQL to get this value:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_CS.TRANSFORM(SDO_GEOMETRY('POINT (-98.493183 29.424170)', 4326), 41157)) FROM DUAL;
nad_wkt = 'POINT (300662.034646583 5416427.45974934)'
nad_srid = 41157
else:
# San Antonio in 'NAD83(HARN) / Texas Centric Lambert Conformal' (SRID 3084)
nad_wkt = 'POINT (1645978.362408288754523 6276356.025927528738976)' # Used ogr.py in gdal 1.4.1 for this transform
nad_srid = 3084
# Constructing & querying with a point from a different SRID. Oracle
# `SDO_OVERLAPBDYINTERSECT` operates differently from
# `ST_Intersects`, so contains is used instead.
nad_pnt = fromstr(nad_wkt, srid=nad_srid)
if oracle:
tx = Country.objects.get(mpoly__contains=nad_pnt)
else:
tx = Country.objects.get(mpoly__intersects=nad_pnt)
self.assertEqual('Texas', tx.name)
# Creating San Antonio. Remember the Alamo.
sa = City.objects.create(name='San Antonio', point=nad_pnt)
# Now verifying that San Antonio was transformed correctly
sa = City.objects.get(name='San Antonio')
self.assertAlmostEqual(wgs_pnt.x, sa.point.x, 6)
self.assertAlmostEqual(wgs_pnt.y, sa.point.y, 6)
# If the GeometryField SRID is -1, then we shouldn't perform any
# transformation if the SRID of the input geometry is different.
# SpatiaLite does not support missing SRID values.
if not spatialite:
m1 = MinusOneSRID(geom=Point(17, 23, srid=4326))
m1.save()
self.assertEqual(-1, m1.geom.srid)
def test_createnull(self):
"Testing creating a model instance and the geometry being None"
c = City()
self.assertEqual(c.point, None)
@no_spatialite # SpatiaLite does not support abstract geometry columns
def test_geometryfield(self):
"Testing the general GeometryField."
Feature(name='Point', geom=Point(1, 1)).save()
Feature(name='LineString', geom=LineString((0, 0), (1, 1), (5, 5))).save()
Feature(name='Polygon', geom=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)))).save()
Feature(name='GeometryCollection',
geom=GeometryCollection(Point(2, 2), LineString((0, 0), (2, 2)),
Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))).save()
f_1 = Feature.objects.get(name='Point')
self.assertEqual(True, isinstance(f_1.geom, Point))
self.assertEqual((1.0, 1.0), f_1.geom.tuple)
f_2 = Feature.objects.get(name='LineString')
self.assertEqual(True, isinstance(f_2.geom, LineString))
self.assertEqual(((0.0, 0.0), (1.0, 1.0), (5.0, 5.0)), f_2.geom.tuple)
f_3 = Feature.objects.get(name='Polygon')
self.assertEqual(True, isinstance(f_3.geom, Polygon))
f_4 = Feature.objects.get(name='GeometryCollection')
self.assertEqual(True, isinstance(f_4.geom, GeometryCollection))
self.assertEqual(f_3.geom, f_4.geom[2])
@no_mysql
def test_inherited_geofields(self):
"Test GeoQuerySet methods on inherited Geometry fields."
# Creating a Pennsylvanian city.
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
# All transformation SQL will need to be performed on the
# _parent_ table.
qs = PennsylvaniaCity.objects.transform(32128)
self.assertEqual(1, qs.count())
for pc in qs:
self.assertEqual(32128, pc.point.srid)
def test_raw_sql_query(self):
"Testing raw SQL query."
cities1 = City.objects.all()
# Only PostGIS would support a 'select *' query because of its recognized
# HEXEWKB format for geometry fields
as_text = 'ST_AsText' if postgis else 'asText'
cities2 = City.objects.raw('select id, name, %s(point) from geoapp_city' % as_text)
self.assertEqual(len(cities1), len(list(cities2)))
self.assertIsInstance(cities2[0].point, Point)
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class GeoLookupTest(TestCase):
@no_mysql
def test_disjoint_lookup(self):
"Testing the `disjoint` lookup type."
ptown = City.objects.get(name='Pueblo')
qs1 = City.objects.filter(point__disjoint=ptown.point)
self.assertEqual(7, qs1.count())
qs2 = State.objects.filter(poly__disjoint=ptown.point)
self.assertEqual(1, qs2.count())
self.assertEqual('Kansas', qs2[0].name)
def test_contains_contained_lookups(self):
"Testing the 'contained', 'contains', and 'bbcontains' lookup types."
# Getting Texas, yes we were a country -- once ;)
texas = Country.objects.get(name='Texas')
# Seeing what cities are in Texas, should get Houston and Dallas,
# and Oklahoma City because 'contained' only checks on the
# _bounding box_ of the Geometries.
if not oracle:
qs = City.objects.filter(point__contained=texas.mpoly)
self.assertEqual(3, qs.count())
cities = ['Houston', 'Dallas', 'Oklahoma City']
for c in qs:
self.assertEqual(True, c.name in cities)
# Pulling out some cities.
houston = City.objects.get(name='Houston')
wellington = City.objects.get(name='Wellington')
pueblo = City.objects.get(name='Pueblo')
okcity = City.objects.get(name='Oklahoma City')
lawrence = City.objects.get(name='Lawrence')
# Now testing contains on the countries using the points for
# Houston and Wellington.
tx = Country.objects.get(mpoly__contains=houston.point) # Query w/GEOSGeometry
nz = Country.objects.get(mpoly__contains=wellington.point.hex) # Query w/EWKBHEX
self.assertEqual('Texas', tx.name)
self.assertEqual('New Zealand', nz.name)
# Spatialite 2.3 thinks that Lawrence is in Puerto Rico (a NULL geometry).
if not spatialite:
ks = State.objects.get(poly__contains=lawrence.point)
self.assertEqual('Kansas', ks.name)
# Pueblo and Oklahoma City (even though OK City is within the bounding box of Texas)
# are not contained in Texas or New Zealand.
self.assertEqual(0, len(Country.objects.filter(mpoly__contains=pueblo.point))) # Query w/GEOSGeometry object
self.assertEqual((mysql and 1) or 0,
len(Country.objects.filter(mpoly__contains=okcity.point.wkt))) # Qeury w/WKT
# OK City is contained w/in bounding box of Texas.
if not oracle:
qs = Country.objects.filter(mpoly__bbcontains=okcity.point)
self.assertEqual(1, len(qs))
self.assertEqual('Texas', qs[0].name)
# Only PostGIS has `left` and `right` lookup types.
@no_mysql
@no_oracle
@no_spatialite
def test_left_right_lookups(self):
"Testing the 'left' and 'right' lookup types."
# Left: A << B => true if xmax(A) < xmin(B)
# Right: A >> B => true if xmin(A) > xmax(B)
# See: BOX2D_left() and BOX2D_right() in lwgeom_box2dfloat4.c in PostGIS source.
# Getting the borders for Colorado & Kansas
co_border = State.objects.get(name='Colorado').poly
ks_border = State.objects.get(name='Kansas').poly
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
# These cities should be strictly to the right of the CO border.
cities = ['Houston', 'Dallas', 'Oklahoma City',
'Lawrence', 'Chicago', 'Wellington']
qs = City.objects.filter(point__right=co_border)
self.assertEqual(6, len(qs))
for c in qs:
self.assertEqual(True, c.name in cities)
# These cities should be strictly to the right of the KS border.
cities = ['Chicago', 'Wellington']
qs = City.objects.filter(point__right=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertEqual(True, c.name in cities)
# Note: Wellington has an 'X' value of 174, so it will not be considered
# to the left of CO.
vic = City.objects.get(point__left=co_border)
self.assertEqual('Victoria', vic.name)
cities = ['Pueblo', 'Victoria']
qs = City.objects.filter(point__left=ks_border)
self.assertEqual(2, len(qs))
for c in qs:
self.assertEqual(True, c.name in cities)
# The left/right lookup tests are known failures on PostGIS 2.0/2.0.1
# http://trac.osgeo.org/postgis/ticket/2035
if postgis_bug_version():
test_left_right_lookups = unittest.expectedFailure(test_left_right_lookups)
def test_equals_lookups(self):
"Testing the 'same_as' and 'equals' lookup types."
pnt = fromstr('POINT (-95.363151 29.763374)', srid=4326)
c1 = City.objects.get(point=pnt)
c2 = City.objects.get(point__same_as=pnt)
c3 = City.objects.get(point__equals=pnt)
for c in [c1, c2, c3]:
self.assertEqual('Houston', c.name)
@no_mysql
def test_null_geometries(self):
"Testing NULL geometry support, and the `isnull` lookup type."
# Creating a state with a NULL boundary.
State.objects.create(name='Puerto Rico')
# Querying for both NULL and Non-NULL values.
nullqs = State.objects.filter(poly__isnull=True)
validqs = State.objects.filter(poly__isnull=False)
# Puerto Rico should be NULL (it's a commonwealth unincorporated territory)
self.assertEqual(1, len(nullqs))
self.assertEqual('Puerto Rico', nullqs[0].name)
# The valid states should be Colorado & Kansas
self.assertEqual(2, len(validqs))
state_names = [s.name for s in validqs]
self.assertEqual(True, 'Colorado' in state_names)
self.assertEqual(True, 'Kansas' in state_names)
# Saving another commonwealth w/a NULL geometry.
nmi = State.objects.create(name='Northern Mariana Islands', poly=None)
self.assertEqual(nmi.poly, None)
# Assigning a geometry and saving -- then UPDATE back to NULL.
nmi.poly = 'POLYGON((0 0,1 0,1 1,1 0,0 0))'
nmi.save()
State.objects.filter(name='Northern Mariana Islands').update(poly=None)
self.assertEqual(None, State.objects.get(name='Northern Mariana Islands').poly)
@no_mysql
def test_relate_lookup(self):
"Testing the 'relate' lookup type."
# To make things more interesting, we will have our Texas reference point in
# different SRIDs.
pnt1 = fromstr('POINT (649287.0363174 4177429.4494686)', srid=2847)
pnt2 = fromstr('POINT(-98.4919715741052 29.4333344025053)', srid=4326)
# Not passing in a geometry as first param should
# raise a type error when initializing the GeoQuerySet
self.assertRaises(ValueError, Country.objects.filter, mpoly__relate=(23, 'foo'))
# Making sure the right exception is raised for the given
# bad arguments.
for bad_args, e in [((pnt1, 0), ValueError), ((pnt2, 'T*T***FF*', 0), ValueError)]:
qs = Country.objects.filter(mpoly__relate=bad_args)
self.assertRaises(e, qs.count)
# Relate works differently for the different backends.
if postgis or spatialite:
contains_mask = 'T*T***FF*'
within_mask = 'T*F**F***'
intersects_mask = 'T********'
elif oracle:
contains_mask = 'contains'
within_mask = 'inside'
# TODO: This is not quite the same as the PostGIS mask above
intersects_mask = 'overlapbdyintersect'
# Testing contains relation mask.
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, contains_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, contains_mask)).name)
# Testing within relation mask.
ks = State.objects.get(name='Kansas')
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, within_mask)).name)
# Testing intersection relation mask.
if not oracle:
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt1, intersects_mask)).name)
self.assertEqual('Texas', Country.objects.get(mpoly__relate=(pnt2, intersects_mask)).name)
self.assertEqual('Lawrence', City.objects.get(point__relate=(ks.poly, intersects_mask)).name)
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class GeoQuerySetTest(TestCase):
# Please keep the tests in GeoQuerySet method's alphabetic order
@no_mysql
def test_centroid(self):
"Testing the `centroid` GeoQuerySet method."
qs = State.objects.exclude(poly__isnull=True).centroid()
if oracle:
tol = 0.1
elif spatialite:
tol = 0.000001
else:
tol = 0.000000001
for s in qs:
self.assertEqual(True, s.poly.centroid.equals_exact(s.centroid, tol))
@no_mysql
def test_diff_intersection_union(self):
"Testing the `difference`, `intersection`, `sym_difference`, and `union` GeoQuerySet methods."
geom = Point(5, 23)
qs = Country.objects.all().difference(geom).sym_difference(geom).union(geom)
# XXX For some reason SpatiaLite does something screwey with the Texas geometry here. Also,
# XXX it doesn't like the null intersection.
if spatialite:
qs = qs.exclude(name='Texas')
else:
qs = qs.intersection(geom)
for c in qs:
if oracle:
# Should be able to execute the queries; however, they won't be the same
# as GEOS (because Oracle doesn't use GEOS internally like PostGIS or
# SpatiaLite).
pass
else:
self.assertEqual(c.mpoly.difference(geom), c.difference)
if not spatialite:
self.assertEqual(c.mpoly.intersection(geom), c.intersection)
# Ordering might differ in collections
self.assertSetEqual(set(g.wkt for g in c.mpoly.sym_difference(geom)),
set(g.wkt for g in c.sym_difference))
self.assertSetEqual(set(g.wkt for g in c.mpoly.union(geom)),
set(g.wkt for g in c.union))
@skipUnless(getattr(connection.ops, 'envelope', False), 'Database does not support envelope operation')
def test_envelope(self):
"Testing the `envelope` GeoQuerySet method."
countries = Country.objects.all().envelope()
for country in countries:
self.assertIsInstance(country.envelope, Polygon)
@no_mysql
@no_spatialite # SpatiaLite does not have an Extent function
def test_extent(self):
"Testing the `extent` GeoQuerySet method."
# Reference query:
# `SELECT ST_extent(point) FROM geoapp_city WHERE (name='Houston' or name='Dallas');`
# => BOX(-96.8016128540039 29.7633724212646,-95.3631439208984 32.7820587158203)
expected = (-96.8016128540039, 29.7633724212646, -95.3631439208984, 32.782058715820)
qs = City.objects.filter(name__in=('Houston', 'Dallas'))
extent = qs.extent()
for val, exp in zip(extent, expected):
self.assertAlmostEqual(exp, val, 4)
@no_mysql
@no_oracle
@no_spatialite
def test_force_rhr(self):
"Testing GeoQuerySet.force_rhr()."
rings = (
((0, 0), (5, 0), (0, 5), (0, 0)),
((1, 1), (1, 3), (3, 1), (1, 1)),
)
rhr_rings = (
((0, 0), (0, 5), (5, 0), (0, 0)),
((1, 1), (3, 1), (1, 3), (1, 1)),
)
State.objects.create(name='Foo', poly=Polygon(*rings))
s = State.objects.force_rhr().get(name='Foo')
self.assertEqual(rhr_rings, s.force_rhr.coords)
@no_mysql
@no_oracle
@no_spatialite
def test_geohash(self):
"Testing GeoQuerySet.geohash()."
if not connection.ops.geohash:
return
# Reference query:
# SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston';
# SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston';
ref_hash = '9vk1mfq8jx0c8e0386z6'
h1 = City.objects.geohash().get(name='Houston')
h2 = City.objects.geohash(precision=5).get(name='Houston')
self.assertEqual(ref_hash, h1.geohash)
self.assertEqual(ref_hash[:5], h2.geohash)
def test_geojson(self):
"Testing GeoJSON output from the database using GeoQuerySet.geojson()."
# Only PostGIS 1.3.4+ and SpatiaLite 3.0+ support GeoJSON.
if not connection.ops.geojson:
self.assertRaises(NotImplementedError, Country.objects.all().geojson, field_name='mpoly')
return
pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}'
houston_json = '{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}'
victoria_json = '{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],"coordinates":[-123.305196,48.462611]}'
chicago_json = '{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}'
if postgis and connection.ops.spatial_version < (1, 4, 0):
pueblo_json = '{"type":"Point","coordinates":[-104.60925200,38.25500100]}'
houston_json = '{"type":"Point","crs":{"type":"EPSG","properties":{"EPSG":4326}},"coordinates":[-95.36315100,29.76337400]}'
victoria_json = '{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],"coordinates":[-123.30519600,48.46261100]}'
elif spatialite:
victoria_json = '{"type":"Point","bbox":[-123.305196,48.462611,-123.305196,48.462611],"coordinates":[-123.305196,48.462611]}'
# Precision argument should only be an integer
self.assertRaises(TypeError, City.objects.geojson, precision='foo')
# Reference queries and values.
# SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo';
self.assertEqual(pueblo_json, City.objects.geojson().get(name='Pueblo').geojson)
# 1.3.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston';
# 1.4.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston';
# This time we want to include the CRS by using the `crs` keyword.
self.assertEqual(houston_json, City.objects.geojson(crs=True, model_att='json').get(name='Houston').json)
# 1.3.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Victoria';
# 1.4.x: SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Houston';
# This time we include the bounding box by using the `bbox` keyword.
self.assertEqual(victoria_json, City.objects.geojson(bbox=True).get(name='Victoria').geojson)
# 1.(3|4).x: SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Chicago';
# Finally, we set every available keyword.
self.assertEqual(chicago_json, City.objects.geojson(bbox=True, crs=True, precision=5).get(name='Chicago').geojson)
def test_gml(self):
"Testing GML output from the database using GeoQuerySet.gml()."
if mysql or (spatialite and not connection.ops.gml):
self.assertRaises(NotImplementedError, Country.objects.all().gml, field_name='mpoly')
return
# Should throw a TypeError when tyring to obtain GML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.gml, field_name='name')
ptown1 = City.objects.gml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.gml(precision=9).get(name='Pueblo')
if oracle:
# No precision parameter for Oracle :-/
gml_regex = re.compile(r'^<gml:Point srsName="SDO:4326" xmlns:gml="http://www.opengis.net/gml"><gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ </gml:coordinates></gml:Point>')
elif spatialite and connection.ops.spatial_version < (3, 0, 0):
# Spatialite before 3.0 has extra colon in SrsName
gml_regex = re.compile(r'^<gml:Point SrsName="EPSG::4326"><gml:coordinates decimal="\." cs="," ts=" ">-104.609251\d+,38.255001</gml:coordinates></gml:Point>')
else:
gml_regex = re.compile(r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>')
for ptown in [ptown1, ptown2]:
self.assertTrue(gml_regex.match(ptown.gml))
# PostGIS < 1.5 doesn't include dimension im GMLv3 output.
if postgis and connection.ops.spatial_version >= (1, 5, 0):
self.assertIn('<gml:pos srsDimension="2">',
City.objects.gml(version=3).get(name='Pueblo').gml)
def test_kml(self):
"Testing KML output from the database using GeoQuerySet.kml()."
# Only PostGIS and Spatialite (>=2.4.0-RC4) support KML serialization
if not (postgis or (spatialite and connection.ops.kml)):
self.assertRaises(NotImplementedError, State.objects.all().kml, field_name='poly')
return
# Should throw a TypeError when trying to obtain KML from a
# non-geometry field.
qs = City.objects.all()
self.assertRaises(TypeError, qs.kml, 'name')
# The reference KML depends on the version of PostGIS used
# (the output stopped including altitude in 1.3.3).
if connection.ops.spatial_version >= (1, 3, 3):
ref_kml = '<Point><coordinates>-104.609252,38.255001</coordinates></Point>'
else:
ref_kml = '<Point><coordinates>-104.609252,38.255001,0</coordinates></Point>'
# Ensuring the KML is as expected.
ptown1 = City.objects.kml(field_name='point', precision=9).get(name='Pueblo')
ptown2 = City.objects.kml(precision=9).get(name='Pueblo')
for ptown in [ptown1, ptown2]:
self.assertEqual(ref_kml, ptown.kml)
# Only PostGIS has support for the MakeLine aggregate.
@no_mysql
@no_oracle
@no_spatialite
def test_make_line(self):
"Testing the `make_line` GeoQuerySet method."
# Ensuring that a `TypeError` is raised on models without PointFields.
self.assertRaises(TypeError, State.objects.make_line)
self.assertRaises(TypeError, Country.objects.make_line)
# Reference query:
# SELECT AsText(ST_MakeLine(geoapp_city.point)) FROM geoapp_city;
ref_line = GEOSGeometry('LINESTRING(-95.363151 29.763374,-96.801611 32.782057,-97.521157 34.464642,174.783117 -41.315268,-104.609252 38.255001,-95.23506 38.971823,-87.650175 41.850385,-123.305196 48.462611)', srid=4326)
self.assertEqual(ref_line, City.objects.make_line())
@no_mysql
def test_num_geom(self):
"Testing the `num_geom` GeoQuerySet method."
# Both 'countries' only have two geometries.
for c in Country.objects.num_geom():
self.assertEqual(2, c.num_geom)
for c in City.objects.filter(point__isnull=False).num_geom():
# Oracle and PostGIS 2.0+ will return 1 for the number of
# geometries on non-collections, whereas PostGIS < 2.0.0
# will return None.
if postgis and connection.ops.spatial_version < (2, 0, 0):
self.assertIsNone(c.num_geom)
else:
self.assertEqual(1, c.num_geom)
@no_mysql
@no_spatialite # SpatiaLite can only count vertices in LineStrings
def test_num_points(self):
"Testing the `num_points` GeoQuerySet method."
for c in Country.objects.num_points():
self.assertEqual(c.mpoly.num_points, c.num_points)
if not oracle:
# Oracle cannot count vertices in Point geometries.
for c in City.objects.num_points():
self.assertEqual(1, c.num_points)
@no_mysql
def test_point_on_surface(self):
"Testing the `point_on_surface` GeoQuerySet method."
# Reference values.
if oracle:
# SELECT SDO_UTIL.TO_WKTGEOMETRY(SDO_GEOM.SDO_POINTONSURFACE(GEOAPP_COUNTRY.MPOLY, 0.05)) FROM GEOAPP_COUNTRY;
ref = {'New Zealand': fromstr('POINT (174.616364 -36.100861)', srid=4326),
'Texas': fromstr('POINT (-103.002434 36.500397)', srid=4326),
}
elif postgis or spatialite:
# Using GEOSGeometry to compute the reference point on surface values
# -- since PostGIS also uses GEOS these should be the same.
ref = {'New Zealand': Country.objects.get(name='New Zealand').mpoly.point_on_surface,
'Texas': Country.objects.get(name='Texas').mpoly.point_on_surface
}
for c in Country.objects.point_on_surface():
if spatialite:
# XXX This seems to be a WKT-translation-related precision issue?
tol = 0.00001
else:
tol = 0.000000001
self.assertEqual(True, ref[c.name].equals_exact(c.point_on_surface, tol))
@no_mysql
@no_spatialite
def test_reverse_geom(self):
"Testing GeoQuerySet.reverse_geom()."
coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)]
Track.objects.create(name='Foo', line=LineString(coords))
t = Track.objects.reverse_geom().get(name='Foo')
coords.reverse()
self.assertEqual(tuple(coords), t.reverse_geom.coords)
if oracle:
self.assertRaises(TypeError, State.objects.reverse_geom)
@no_mysql
@no_oracle
def test_scale(self):
"Testing the `scale` GeoQuerySet method."
xfac, yfac = 2, 3
tol = 5 # XXX The low precision tolerance is for SpatiaLite
qs = Country.objects.scale(xfac, yfac, model_att='scaled')
for c in qs:
for p1, p2 in zip(c.mpoly, c.scaled):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
self.assertAlmostEqual(c1[0] * xfac, c2[0], tol)
self.assertAlmostEqual(c1[1] * yfac, c2[1], tol)
@no_mysql
@no_oracle
@no_spatialite
def test_snap_to_grid(self):
"Testing GeoQuerySet.snap_to_grid()."
# Let's try and break snap_to_grid() with bad combinations of arguments.
for bad_args in ((), range(3), range(5)):
self.assertRaises(ValueError, Country.objects.snap_to_grid, *bad_args)
for bad_args in (('1.0',), (1.0, None), tuple(map(six.text_type, range(4)))):
self.assertRaises(TypeError, Country.objects.snap_to_grid, *bad_args)
# Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org
# from the world borders dataset he provides.
wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,'
'12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,'
'12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,'
'12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,'
'12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,'
'12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,'
'12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,'
'12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))')
Country.objects.create(name='San Marino', mpoly=fromstr(wkt))
# Because floating-point arithmetic isn't exact, we set a tolerance
# to pass into GEOS `equals_exact`.
tol = 0.000000001
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.1).get(name='San Marino').snap_to_grid, tol))
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23).get(name='San Marino').snap_to_grid, tol))
# SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country" WHERE "geoapp_country"."name" = 'San Marino';
ref = fromstr('MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))')
self.assertTrue(ref.equals_exact(Country.objects.snap_to_grid(0.05, 0.23, 0.5, 0.17).get(name='San Marino').snap_to_grid, tol))
def test_svg(self):
"Testing SVG output using GeoQuerySet.svg()."
if mysql or oracle:
self.assertRaises(NotImplementedError, City.objects.svg)
return
self.assertRaises(TypeError, City.objects.svg, precision='foo')
# SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo';
svg1 = 'cx="-104.609252" cy="-38.255001"'
# Even though relative, only one point so it's practically the same except for
# the 'c' letter prefix on the x,y values.
svg2 = svg1.replace('c', '')
self.assertEqual(svg1, City.objects.svg().get(name='Pueblo').svg)
self.assertEqual(svg2, City.objects.svg(relative=5).get(name='Pueblo').svg)
@no_mysql
def test_transform(self):
"Testing the transform() GeoQuerySet method."
# Pre-transformed points for Houston and Pueblo.
htown = fromstr('POINT(1947516.83115183 6322297.06040572)', srid=3084)
ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774)
prec = 3 # Precision is low due to version variations in PROJ and GDAL.
# Asserting the result of the transform operation with the values in
# the pre-transformed points. Oracle does not have the 3084 SRID.
if not oracle:
h = City.objects.transform(htown.srid).get(name='Houston')
self.assertEqual(3084, h.point.srid)
self.assertAlmostEqual(htown.x, h.point.x, prec)
self.assertAlmostEqual(htown.y, h.point.y, prec)
p1 = City.objects.transform(ptown.srid, field_name='point').get(name='Pueblo')
p2 = City.objects.transform(srid=ptown.srid).get(name='Pueblo')
for p in [p1, p2]:
self.assertEqual(2774, p.point.srid)
self.assertAlmostEqual(ptown.x, p.point.x, prec)
self.assertAlmostEqual(ptown.y, p.point.y, prec)
@no_mysql
@no_oracle
def test_translate(self):
"Testing the `translate` GeoQuerySet method."
xfac, yfac = 5, -23
qs = Country.objects.translate(xfac, yfac, model_att='translated')
for c in qs:
for p1, p2 in zip(c.mpoly, c.translated):
for r1, r2 in zip(p1, p2):
for c1, c2 in zip(r1.coords, r2.coords):
# XXX The low precision is for SpatiaLite
self.assertAlmostEqual(c1[0] + xfac, c2[0], 5)
self.assertAlmostEqual(c1[1] + yfac, c2[1], 5)
@no_mysql
def test_unionagg(self):
"Testing the `unionagg` (aggregate union) GeoQuerySet method."
tx = Country.objects.get(name='Texas').mpoly
# Houston, Dallas -- Ordering may differ depending on backend or GEOS version.
union1 = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)')
union2 = fromstr('MULTIPOINT(-95.363151 29.763374,-96.801611 32.782057)')
qs = City.objects.filter(point__within=tx)
self.assertRaises(TypeError, qs.unionagg, 'name')
# Using `field_name` keyword argument in one query and specifying an
# order in the other (which should not be used because this is
# an aggregate method on a spatial column)
u1 = qs.unionagg(field_name='point')
u2 = qs.order_by('name').unionagg()
tol = 0.00001
self.assertEqual(True, union1.equals_exact(u1, tol) or union2.equals_exact(u1, tol))
self.assertEqual(True, union1.equals_exact(u2, tol) or union2.equals_exact(u2, tol))
qs = City.objects.filter(name='NotACity')
self.assertEqual(None, qs.unionagg(field_name='point'))
def test_non_concrete_field(self):
pkfield = City._meta.get_field_by_name('id')[0]
orig_pkfield_col = pkfield.column
pkfield.column = None
try:
list(City.objects.all())
finally:
pkfield.column = orig_pkfield_col
| |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import io
import os
import platform
import re
import subprocess
import sys
class TestFailedError(Exception):
pass
def escapeCmdArg(arg):
if '"' in arg or ' ' in arg:
return '"%s"' % arg.replace('"', '\\"')
else:
return arg
def run_command(cmd):
if sys.version_info[0] < 3:
cmd = list(map(lambda s: s.encode('utf-8'), cmd))
print(' '.join([escapeCmdArg(arg) for arg in cmd]))
if sys.version_info[0] < 3 or platform.system() == 'Windows':
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
else:
return subprocess.check_output(list(map(lambda s: s.encode('utf-8'), cmd)),
stderr=subprocess.STDOUT)
def parseLine(line, line_no, test_case, incremental_edit_args, reparse_args,
current_reparse_start):
pre_edit_line = ""
post_edit_line = ""
# We parse one tag at a time in the line while eating away a prefix of the
# line
while line:
# The regular expression to match the template markers
subst_re = re.compile(r'^(.*?)<<(.*?)<(.*?)\|\|\|(.*?)>>>(.*\n?)')
reparse_re = re.compile(r'^(.*?)<(/?)reparse ?(.*?)>(.*\n?)')
subst_match = subst_re.match(line)
reparse_match = reparse_re.match(line)
if subst_match and reparse_match:
# If both regex match use the one with the shorter prefix
if len(subst_match.group(1)) < len(reparse_match.group(1)):
reparse_match = None
else:
subst_match = None
if subst_match:
prefix = subst_match.group(1)
match_test_case = subst_match.group(2)
pre_edit = subst_match.group(3)
post_edit = subst_match.group(4)
suffix = subst_match.group(5)
if match_test_case == test_case:
# Compute the -incremental-edit argument for swift-syntax-test
column = len(pre_edit_line) + len(prefix) + 1
edit_arg = '%d:%d-%d:%d=%s' % \
(line_no, column, line_no, column + len(pre_edit.encode('utf-8')),
post_edit)
incremental_edit_args.append('-incremental-edit')
incremental_edit_args.append(edit_arg)
pre_edit_line += prefix + pre_edit
post_edit_line += prefix + post_edit
else:
# For different test cases just take the pre-edit text
pre_edit_line += prefix + pre_edit
post_edit_line += prefix + pre_edit
line = suffix
elif reparse_match:
prefix = reparse_match.group(1)
is_closing = len(reparse_match.group(2)) > 0
match_test_case = reparse_match.group(3)
suffix = reparse_match.group(4)
if match_test_case == test_case:
column = len(post_edit_line) + len(prefix) + 1
if is_closing:
if not current_reparse_start:
raise TestFailedError('Closing unopened reparse tag '
'in line %d' % line_no)
reparse_args.append('-reparse-region')
reparse_args.append(
'%d:%d-%d:%d' % (current_reparse_start[0],
current_reparse_start[1],
line_no, column))
current_reparse_start = None
else:
if current_reparse_start:
raise TestFailedError('Opening nested reparse tags '
'for the same test case in line '
'%d' % line_no)
current_reparse_start = [line_no, column]
pre_edit_line += prefix
post_edit_line += prefix
line = suffix
else:
pre_edit_line += line
post_edit_line += line
# Nothing more to do
line = ''
return (pre_edit_line.encode('utf-8'),
post_edit_line.encode('utf-8'),
current_reparse_start)
def prepareForIncrParse(test_file, test_case, pre_edit_file, post_edit_file,
incremental_edit_args, reparse_args):
with io.open(test_file, mode='r', encoding='utf-8',
newline='\n') as test_file_handle, \
io.open(pre_edit_file, mode='w+', encoding='utf-8',
newline='\n') as pre_edit_file_handle, \
io.open(post_edit_file, mode='w+', encoding='utf-8',
newline='\n') as post_edit_file_handle:
current_reparse_start = None
line_no = 1
for line in test_file_handle.readlines():
parseLineRes = parseLine(line, line_no, test_case,
incremental_edit_args,
reparse_args, current_reparse_start)
(pre_edit_line, post_edit_line, current_reparse_start) = \
parseLineRes
pre_edit_file_handle.write(pre_edit_line.decode('utf-8'))
post_edit_file_handle.write(post_edit_line.decode('utf-8'))
line_no += 1
if current_reparse_start:
raise TestFailedError('Unclosed reparse tag for test case %s' %
test_case)
def serializeIncrParseMarkupFile(test_file, test_case, mode,
serialization_mode, serialization_format,
omit_node_ids, output_file, diags_output_file,
temp_dir, swift_syntax_test,
print_visual_reuse_info):
test_file_name = os.path.basename(test_file)
pre_edit_file = temp_dir + '/' + test_file_name + '.' + test_case + \
'.pre.swift'
post_edit_file = temp_dir + '/' + test_file_name + '.' + test_case + \
'.post.swift'
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
# =========================================================================
# First generate the pre-edit and post-edit Swift file and gather the edits
# and expected reparse regions. This is the parser for the special edit
# markup for testing incremental parsing
# =========================================================================
# Gather command line arguments for swift-syntax-test specifiying the
# performed edits in this list
incremental_edit_args = []
reparse_args = []
prepareForIncrParse(test_file, test_case, pre_edit_file, post_edit_file,
incremental_edit_args, reparse_args)
# =========================================================================
# Now generate the requested serialized file
# =========================================================================
# Build the command to serialize the tree depending on the command line
# arguments
try:
command = [
swift_syntax_test,
'-serialize-raw-tree',
'-output-filename', output_file
]
if diags_output_file:
command.extend(['-diags-output-filename', diags_output_file])
if omit_node_ids:
command.extend(['-omit-node-ids'])
if serialization_mode == 'full':
# Nothing to do. This is the default behaviour of swift-syntax-test
pass
elif serialization_mode == 'incremental':
command.extend(['-incremental-serialization'])
else:
raise ValueError('Unknown serialization mode "%s"' %
serialization_mode)
if serialization_format == 'json':
# Nothing to do. This is the default behaviour of swift-syntax-test
pass
elif serialization_format == 'byteTree':
command.extend(['-serialize-byte-tree'])
else:
raise ValueError('Unknown serialization format "%s"' %
serialization_format)
if mode == 'pre-edit':
command.extend(['-input-source-filename', pre_edit_file])
elif mode == 'post-edit':
command.extend(['-input-source-filename', post_edit_file])
elif mode == 'incremental':
# We need to build the syntax tree of the pre-edit file first so
# that we can pass it to swift-syntax-test to perform incremental
# parsing
pre_edit_tree_file = pre_edit_file + '.serialized.json'
run_command([swift_syntax_test] +
['-serialize-raw-tree'] +
['-input-source-filename', pre_edit_file] +
['-output-filename', pre_edit_tree_file])
# Then perform incremental parsing with the old syntax tree on the
# post-edit file
command.extend(['-input-source-filename', post_edit_file])
command.extend(['-old-syntax-tree-filename',
pre_edit_tree_file])
command.extend(['--old-source-filename', pre_edit_file])
command.extend(incremental_edit_args)
command.extend(reparse_args)
if print_visual_reuse_info:
command.extend([
'-print-visual-reuse-info',
'-force-colored-output'
])
else:
raise ValueError('Unknown mode "%s"' % mode)
output = run_command(command)
if print_visual_reuse_info:
print(output)
except subprocess.CalledProcessError as e:
raise TestFailedError(e.output.decode('utf-8'))
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Utility for testing incremental syntax parsing',
epilog='''
This utility can parse a special markup to dedicate a pre-edit and a
post-edit version of a file simulateously and generate a serialized version
of the libSyntax tree by parsing either the pre-edit file, the post-edit
file or the edits that are required to retrieve the post-edit file from the
pre-edit file incrementally.
To generate the pre-edit and the post-edit file from the template, it
operates on markers of the form:
<<test_case<pre|||post>>>
These placeholders are replaced by:
- 'pre' if a different test case than 'test_case' is run
- 'pre' for the pre-edit version of 'test_case'
- 'post' for the post-edit version of 'test_case''')
parser.add_argument(
'file', type=argparse.FileType(),
help='The template file to test')
parser.add_argument(
'--test-case', default='',
help='The test case to execute. If no test case is specified all \
unnamed substitutions are applied')
parser.add_argument(
'--mode', choices=['pre-edit', 'incremental', 'post-edit'],
required=True, help='''
The type of parsing to perform:
- pre-edit: Serialize the syntax tree when parsing the pre-edit file \
from scratch
- incremental: Serialize the syntax tree that results from parsing the \
edits between the pre-edit and post-edit file incrementally
- post-edit: Serialize the syntax tree that results from parsing the \
post-edit file from scratch
''')
parser.add_argument(
'--serialization-mode', choices=['full', 'incremental'],
default='full', help='''
Only applicable if `--mode` is `incremental`. Whether to serialize the
entire tree or use the incremental transfer mode. Default is `full`.
''')
parser.add_argument(
'--serialization-format', choices=['json', 'byteTree'],
default='json', help='''
The format in which the syntax tree shall be serialized.
''')
parser.add_argument(
'--omit-node-ids', default=False, action='store_true',
help='Don\'t include the ids of the nodes in the serialized syntax \
tree')
parser.add_argument(
'--output-file', required=True,
help='The file to which the serialized tree shall be written.')
parser.add_argument(
'--temp-dir', required=True,
help='A temporary directory where pre-edit and post-edit files can be \
saved')
parser.add_argument(
'--swift-syntax-test', required=True,
help='The path to swift-syntax-test')
parser.add_argument(
'--print-visual-reuse-info', default=False, action='store_true',
help='Print visual reuse information about the incremental parse \
instead of diffing the syntax trees. This option is intended \
for debug purposes only.')
args = parser.parse_args(sys.argv[1:])
test_file = args.file.name
test_case = args.test_case
mode = args.mode
serialization_mode = args.serialization_mode
serialization_format = args.serialization_format
omit_node_ids = args.omit_node_ids
output_file = args.output_file
temp_dir = args.temp_dir
swift_syntax_test = args.swift_syntax_test
visual_reuse_info = args.print_visual_reuse_info
try:
serializeIncrParseMarkupFile(test_file=test_file,
test_case=test_case,
mode=mode,
serialization_mode=serialization_mode,
serialization_format=serialization_format,
omit_node_ids=omit_node_ids,
output_file=output_file,
diags_output_file=None,
temp_dir=temp_dir,
swift_syntax_test=swift_syntax_test,
print_visual_reuse_info=visual_reuse_info)
except TestFailedError as e:
print(e.message, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
"""
Class for "reading" fake data from an imaginary file.
For the user, it generates a :class:`Segment` or a :class:`Block` with a
sinusoidal :class:`AnalogSignal`, a :class:`SpikeTrain` and an
:class:`EventArray`.
For a developer, it is just an example showing guidelines for someone who wants
to develop a new IO module.
Depends on: scipy
Supported: Read
Author: sgarcia
"""
# needed for python 3 compatibility
from __future__ import absolute_import
# note neo.core needs only numpy and quantities
import numpy as np
import quantities as pq
# but my specific IO can depend on many other packages
try:
from scipy import stats
except ImportError as err:
HAVE_SCIPY = False
SCIPY_ERR = err
else:
HAVE_SCIPY = True
SCIPY_ERR = None
# I need to subclass BaseIO
from neo.io.baseio import BaseIO
# to import from core
from neo.core import Segment, AnalogSignal, SpikeTrain, EventArray
# I need to subclass BaseIO
class ExampleIO(BaseIO):
"""
Class for "reading" fake data from an imaginary file.
For the user, it generates a :class:`Segment` or a :class:`Block` with a
sinusoidal :class:`AnalogSignal`, a :class:`SpikeTrain` and an
:class:`EventArray`.
For a developer, it is just an example showing guidelines for someone who wants
to develop a new IO module.
Two rules for developers:
* Respect the Neo IO API (:ref:`neo_io_API`)
* Follow :ref:`io_guiline`
Usage:
>>> from neo import io
>>> r = io.ExampleIO(filename='itisafake.nof')
>>> seg = r.read_segment(lazy=False, cascade=True)
>>> print(seg.analogsignals) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<AnalogSignal(array([ 0.19151945, 0.62399373, 0.44149764, ..., 0.96678374,
...
>>> print(seg.spiketrains) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<SpikeTrain(array([ -0.83799524, 6.24017951, 7.76366686, 4.45573701,
12.60644415, 10.68328994, 8.07765735, 4.89967804,
...
>>> print(seg.eventarrays) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<EventArray: TriggerB@9.6976 s, TriggerA@10.2612 s, TriggerB@2.2777 s, TriggerA@6.8607 s, ...
>>> anasig = r.read_analogsignal(lazy=True, cascade=False)
>>> print(anasig._data_description)
{'shape': (150000,)}
>>> anasig = r.read_analogsignal(lazy=False, cascade=False)
"""
is_readable = True # This class can only read data
is_writable = False # write is not supported
# This class is able to directly or indirectly handle the following objects
# You can notice that this greatly simplifies the full Neo object hierarchy
supported_objects = [ Segment , AnalogSignal, SpikeTrain, EventArray ]
# This class can return either a Block or a Segment
# The first one is the default ( self.read )
# These lists should go from highest object to lowest object because
# common_io_test assumes it.
readable_objects = [ Segment , AnalogSignal, SpikeTrain ]
# This class is not able to write objects
writeable_objects = [ ]
has_header = False
is_streameable = False
# This is for GUI stuff : a definition for parameters when reading.
# This dict should be keyed by object (`Block`). Each entry is a list
# of tuple. The first entry in each tuple is the parameter name. The
# second entry is a dict with keys 'value' (for default value),
# and 'label' (for a descriptive name).
# Note that if the highest-level object requires parameters,
# common_io_test will be skipped.
read_params = {
Segment : [
('segment_duration',
{'value' : 15., 'label' : 'Segment size (s.)'}),
('num_analogsignal',
{'value' : 8, 'label' : 'Number of recording points'}),
('num_spiketrain_by_channel',
{'value' : 3, 'label' : 'Num of spiketrains'}),
],
}
# do not supported write so no GUI stuff
write_params = None
name = 'example'
extensions = [ 'nof' ]
# mode can be 'file' or 'dir' or 'fake' or 'database'
# the main case is 'file' but some reader are base on a directory or a database
# this info is for GUI stuff also
mode = 'fake'
def __init__(self , filename = None) :
"""
Arguments:
filename : the filename
Note:
- filename is here just for exampe because it will not be take in account
- if mode=='dir' the argument should be dirname (See TdtIO)
"""
BaseIO.__init__(self)
self.filename = filename
# Seed so all instances can return the same values
np.random.seed(1234)
# Segment reading is supported so I define this :
def read_segment(self,
# the 2 first keyword arguments are imposed by neo.io API
lazy = False,
cascade = True,
# all following arguments are decied by this IO and are free
segment_duration = 15.,
num_analogsignal = 4,
num_spiketrain_by_channel = 3,
):
"""
Return a fake Segment.
The self.filename does not matter.
In this IO read by default a Segment.
This is just a example to be adapted to each ClassIO.
In this case these 3 paramters are taken in account because this function
return a generated segment with fake AnalogSignal and fake SpikeTrain.
Parameters:
segment_duration :is the size in secend of the segment.
num_analogsignal : number of AnalogSignal in this segment
num_spiketrain : number of SpikeTrain in this segment
"""
sampling_rate = 10000. #Hz
t_start = -1.
#time vector for generated signal
timevect = np.arange(t_start, t_start+ segment_duration , 1./sampling_rate)
# create an empty segment
seg = Segment( name = 'it is a seg from exampleio')
if cascade:
# read nested analosignal
for i in range(num_analogsignal):
ana = self.read_analogsignal( lazy = lazy , cascade = cascade ,
channel_index = i ,segment_duration = segment_duration, t_start = t_start)
seg.analogsignals += [ ana ]
# read nested spiketrain
for i in range(num_analogsignal):
for _ in range(num_spiketrain_by_channel):
sptr = self.read_spiketrain(lazy = lazy , cascade = cascade ,
segment_duration = segment_duration, t_start = t_start , channel_index = i)
seg.spiketrains += [ sptr ]
# create an EventArray that mimic triggers.
# note that ExampleIO do not allow to acess directly to EventArray
# for that you need read_segment(cascade = True)
eva = EventArray()
if lazy:
# in lazy case no data are readed
# eva is empty
pass
else:
# otherwise it really contain data
n = 1000
# neo.io support quantities my vector use second for unit
eva.times = timevect[(np.random.rand(n)*timevect.size).astype('i')]* pq.s
# all duration are the same
eva.durations = np.ones(n)*500*pq.ms
# label
l = [ ]
for i in range(n):
if np.random.rand()>.6: l.append( 'TriggerA' )
else : l.append( 'TriggerB' )
eva.labels = np.array( l )
seg.eventarrays += [ eva ]
seg.create_many_to_one_relationship()
return seg
def read_analogsignal(self ,
# the 2 first key arguments are imposed by neo.io API
lazy = False,
cascade = True,
channel_index = 0,
segment_duration = 15.,
t_start = -1,
):
"""
With this IO AnalogSignal can e acces directly with its channel number
"""
sr = 10000.
sinus_freq = 3. # Hz
#time vector for generated signal:
tvect = np.arange(t_start, t_start+ segment_duration , 1./sr)
if lazy:
anasig = AnalogSignal([], units='V', sampling_rate=sr * pq.Hz,
t_start=t_start * pq.s,
channel_index=channel_index)
# we add the attribute lazy_shape with the size if loaded
anasig.lazy_shape = tvect.shape
else:
# create analogsignal (sinus of 3 Hz)
sig = np.sin(2*np.pi*tvect*sinus_freq + channel_index/5.*2*np.pi)+np.random.rand(tvect.size)
anasig = AnalogSignal(sig, units= 'V', sampling_rate=sr * pq.Hz,
t_start=t_start * pq.s,
channel_index=channel_index)
# for attributes out of neo you can annotate
anasig.annotate(info = 'it is a sinus of %f Hz' %sinus_freq )
return anasig
def read_spiketrain(self ,
# the 2 first key arguments are imposed by neo.io API
lazy = False,
cascade = True,
segment_duration = 15.,
t_start = -1,
channel_index = 0,
):
"""
With this IO SpikeTrain can e acces directly with its channel number
"""
# There are 2 possibles behaviour for a SpikeTrain
# holding many Spike instance or directly holding spike times
# we choose here the first :
if not HAVE_SCIPY:
raise SCIPY_ERR
num_spike_by_spiketrain = 40
sr = 10000.
if lazy:
times = [ ]
else:
times = (np.random.rand(num_spike_by_spiketrain)*segment_duration +
t_start)
# create a spiketrain
spiketr = SpikeTrain(times, t_start = t_start*pq.s, t_stop = (t_start+segment_duration)*pq.s ,
units = pq.s,
name = 'it is a spiketrain from exampleio',
)
if lazy:
# we add the attribute lazy_shape with the size if loaded
spiketr.lazy_shape = (num_spike_by_spiketrain,)
# ours spiketrains also hold the waveforms:
# 1 generate a fake spike shape (2d array if trodness >1)
w1 = -stats.nct.pdf(np.arange(11,60,4), 5,20)[::-1]/3.
w2 = stats.nct.pdf(np.arange(11,60,2), 5,20)
w = np.r_[ w1 , w2 ]
w = -w/max(w)
if not lazy:
# in the neo API the waveforms attr is 3 D in case tetrode
# in our case it is mono electrode so dim 1 is size 1
waveforms = np.tile( w[np.newaxis,np.newaxis,:], ( num_spike_by_spiketrain ,1, 1) )
waveforms *= np.random.randn(*waveforms.shape)/6+1
spiketr.waveforms = waveforms*pq.mV
spiketr.sampling_rate = sr * pq.Hz
spiketr.left_sweep = 1.5* pq.s
# for attributes out of neo you can annotate
spiketr.annotate(channel_index = channel_index)
return spiketr
| |
#!/usr/bin/env python
#
# Copyright 2012 eNovance <licensing@enovance.com>
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import fixture as fixture_config
from oslotest import mockpatch
import six
from ceilometer.objectstore import swift_middleware
from ceilometer import pipeline
from ceilometer.tests import base as tests_base
class FakeApp(object):
def __init__(self, body=None):
self.body = body or ['This string is 28 bytes long']
def __call__(self, env, start_response):
yield
start_response('200 OK', [
('Content-Type', 'text/plain'),
('Content-Length', str(sum(map(len, self.body))))
])
while env['wsgi.input'].read(5):
pass
for line in self.body:
yield line
class FakeRequest(object):
"""A bare bones request object
The middleware will inspect this for request method,
wsgi.input and headers.
"""
def __init__(self, path, environ=None, headers=None):
environ = environ or {}
headers = headers or {}
environ['PATH_INFO'] = path
if 'wsgi.input' not in environ:
environ['wsgi.input'] = six.moves.cStringIO('')
for header, value in headers.iteritems():
environ['HTTP_%s' % header.upper()] = value
self.environ = environ
class TestSwiftMiddleware(tests_base.BaseTestCase):
class _faux_pipeline_manager(pipeline.PipelineManager):
class _faux_pipeline(object):
def __init__(self, pipeline_manager):
self.pipeline_manager = pipeline_manager
self.samples = []
def publish_samples(self, ctxt, samples):
self.samples.extend(samples)
def flush(self, context):
pass
def __init__(self):
self.pipelines = [self._faux_pipeline(self)]
def _fake_setup_pipeline(self, transformer_manager=None):
return self.pipeline_manager
def setUp(self):
super(TestSwiftMiddleware, self).setUp()
self.pipeline_manager = self._faux_pipeline_manager()
self.useFixture(mockpatch.PatchObject(
pipeline, 'setup_pipeline',
side_effect=self._fake_setup_pipeline))
self.CONF = self.useFixture(fixture_config.Config()).conf
self.setup_messaging(self.CONF)
@staticmethod
def start_response(*args):
pass
def test_get(self):
app = swift_middleware.CeilometerMiddleware(FakeApp(), {})
req = FakeRequest('/1.0/account/container/obj',
environ={'REQUEST_METHOD': 'GET'})
resp = app(req.environ, self.start_response)
self.assertEqual(["This string is 28 bytes long"], list(resp))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(2, len(samples))
data = samples[0]
self.assertEqual(28, data.volume)
self.assertEqual('1.0', data.resource_metadata['version'])
self.assertEqual('container', data.resource_metadata['container'])
self.assertEqual('obj', data.resource_metadata['object'])
# test the # of request and the request method
data = samples[1]
self.assertEqual('storage.api.request', data.name)
self.assertEqual(1, data.volume)
self.assertEqual('get', data.resource_metadata['method'])
def test_put(self):
app = swift_middleware.CeilometerMiddleware(FakeApp(body=['']), {})
req = FakeRequest(
'/1.0/account/container/obj',
environ={'REQUEST_METHOD': 'PUT',
'wsgi.input':
six.moves.cStringIO('some stuff')})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(2, len(samples))
data = samples[0]
self.assertEqual(10, data.volume)
self.assertEqual('1.0', data.resource_metadata['version'])
self.assertEqual('container', data.resource_metadata['container'])
self.assertEqual('obj', data.resource_metadata['object'])
# test the # of request and the request method
data = samples[1]
self.assertEqual('storage.api.request', data.name)
self.assertEqual(1, data.volume)
self.assertEqual('put', data.resource_metadata['method'])
def test_post(self):
app = swift_middleware.CeilometerMiddleware(FakeApp(body=['']), {})
req = FakeRequest(
'/1.0/account/container/obj',
environ={'REQUEST_METHOD': 'POST',
'wsgi.input': six.moves.cStringIO('some other stuff')})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(2, len(samples))
data = samples[0]
self.assertEqual(16, data.volume)
self.assertEqual('1.0', data.resource_metadata['version'])
self.assertEqual('container', data.resource_metadata['container'])
self.assertEqual('obj', data.resource_metadata['object'])
# test the # of request and the request method
data = samples[1]
self.assertEqual('storage.api.request', data.name)
self.assertEqual(1, data.volume)
self.assertEqual('post', data.resource_metadata['method'])
def test_head(self):
app = swift_middleware.CeilometerMiddleware(FakeApp(body=['']), {})
req = FakeRequest('/1.0/account/container/obj',
environ={'REQUEST_METHOD': 'HEAD'})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(1, len(samples))
data = samples[0]
self.assertEqual('1.0', data.resource_metadata['version'])
self.assertEqual('container', data.resource_metadata['container'])
self.assertEqual('obj', data.resource_metadata['object'])
self.assertEqual('head', data.resource_metadata['method'])
self.assertEqual('storage.api.request', data.name)
self.assertEqual(1, data.volume)
def test_bogus_request(self):
"""Test even for arbitrary request method, this will still work."""
app = swift_middleware.CeilometerMiddleware(FakeApp(body=['']), {})
req = FakeRequest('/1.0/account/container/obj',
environ={'REQUEST_METHOD': 'BOGUS'})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(1, len(samples))
data = samples[0]
self.assertEqual('1.0', data.resource_metadata['version'])
self.assertEqual('container', data.resource_metadata['container'])
self.assertEqual('obj', data.resource_metadata['object'])
self.assertEqual('bogus', data.resource_metadata['method'])
self.assertEqual('storage.api.request', data.name)
self.assertEqual(1, data.volume)
def test_get_container(self):
app = swift_middleware.CeilometerMiddleware(FakeApp(), {})
req = FakeRequest('/1.0/account/container',
environ={'REQUEST_METHOD': 'GET'})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(2, len(samples))
data = samples[0]
self.assertEqual(28, data.volume)
self.assertEqual('1.0', data.resource_metadata['version'])
self.assertEqual('container', data.resource_metadata['container'])
self.assertIsNone(data.resource_metadata['object'])
def test_no_metadata_headers(self):
app = swift_middleware.CeilometerMiddleware(FakeApp(), {})
req = FakeRequest('/1.0/account/container',
environ={'REQUEST_METHOD': 'GET'})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(2, len(samples))
data = samples[0]
http_headers = [k for k in data.resource_metadata.keys()
if k.startswith('http_header_')]
self.assertEqual(0, len(http_headers))
self.assertEqual('1.0', data.resource_metadata['version'])
self.assertEqual('container', data.resource_metadata['container'])
self.assertIsNone(data.resource_metadata['object'])
def test_metadata_headers(self):
app = swift_middleware.CeilometerMiddleware(FakeApp(), {
'metadata_headers': 'X_VAR1, x-var2, x-var3'
})
req = FakeRequest('/1.0/account/container',
environ={'REQUEST_METHOD': 'GET'},
headers={'X_VAR1': 'value1',
'X_VAR2': 'value2'})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(2, len(samples))
data = samples[0]
http_headers = [k for k in data.resource_metadata.keys()
if k.startswith('http_header_')]
self.assertEqual(2, len(http_headers))
self.assertEqual('1.0', data.resource_metadata['version'])
self.assertEqual('container', data.resource_metadata['container'])
self.assertIsNone(data.resource_metadata['object'])
self.assertEqual('value1',
data.resource_metadata['http_header_x_var1'])
self.assertEqual('value2',
data.resource_metadata['http_header_x_var2'])
self.assertFalse('http_header_x_var3' in data.resource_metadata)
def test_metadata_headers_unicode(self):
app = swift_middleware.CeilometerMiddleware(FakeApp(), {
'metadata_headers': 'unicode'
})
uni = u'\xef\xbd\xa1\xef\xbd\xa5'
req = FakeRequest('/1.0/account/container',
environ={'REQUEST_METHOD': 'GET'},
headers={'UNICODE': uni})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(2, len(samples))
data = samples[0]
http_headers = [k for k in data.resource_metadata.keys()
if k.startswith('http_header_')]
self.assertEqual(1, len(http_headers))
self.assertEqual('1.0', data.resource_metadata['version'])
self.assertEqual('container', data.resource_metadata['container'])
self.assertIsNone(data.resource_metadata['object'])
self.assertEqual(uni.encode('utf-8'),
data.resource_metadata['http_header_unicode'])
def test_metadata_headers_on_not_existing_header(self):
app = swift_middleware.CeilometerMiddleware(FakeApp(), {
'metadata_headers': 'x-var3'
})
req = FakeRequest('/1.0/account/container',
environ={'REQUEST_METHOD': 'GET'})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(2, len(samples))
data = samples[0]
http_headers = [k for k in data.resource_metadata.keys()
if k.startswith('http_header_')]
self.assertEqual(0, len(http_headers))
self.assertEqual('1.0', data.resource_metadata['version'])
self.assertEqual('container', data.resource_metadata['container'])
self.assertIsNone(data.resource_metadata['object'])
def test_bogus_path(self):
app = swift_middleware.CeilometerMiddleware(FakeApp(), {})
req = FakeRequest('/5.0//',
environ={'REQUEST_METHOD': 'GET'})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(0, len(samples))
def test_missing_resource_id(self):
app = swift_middleware.CeilometerMiddleware(FakeApp(), {})
req = FakeRequest('/v1/', environ={'REQUEST_METHOD': 'GET'})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(0, len(samples))
@mock.patch.object(swift_middleware.CeilometerMiddleware,
'publish_sample')
def test_publish_sample_fail(self, mocked_publish_sample):
mocked_publish_sample.side_effect = Exception("a exception")
app = swift_middleware.CeilometerMiddleware(FakeApp(body=["test"]), {})
req = FakeRequest('/1.0/account/container',
environ={'REQUEST_METHOD': 'GET'})
resp = list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples
self.assertEqual(0, len(samples))
self.assertEqual(["test"], resp)
mocked_publish_sample.assert_called_once_with(mock.ANY, 0, 4)
def test_reseller_prefix(self):
# No reseller prefix set: ensure middleware uses AUTH_
app = swift_middleware.CeilometerMiddleware(FakeApp(), {})
req = FakeRequest('/1.0/AUTH_account/container/obj',
environ={'REQUEST_METHOD': 'GET'})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples[0]
self.assertEqual("account", samples.resource_id)
# Custom reseller prefix set
app = swift_middleware.CeilometerMiddleware(
FakeApp(), {'reseller_prefix': 'CUSTOM_'})
req = FakeRequest('/1.0/CUSTOM_account/container/obj',
environ={'REQUEST_METHOD': 'GET'})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples[0]
self.assertEqual("account", samples.resource_id)
def test_invalid_reseller_prefix(self):
# Custom reseller prefix set, but without trailing underscore
app = swift_middleware.CeilometerMiddleware(
FakeApp(), {'reseller_prefix': 'CUSTOM'})
req = FakeRequest('/1.0/CUSTOM_account/container/obj',
environ={'REQUEST_METHOD': 'GET'})
list(app(req.environ, self.start_response))
samples = self.pipeline_manager.pipelines[0].samples[0]
self.assertEqual("account", samples.resource_id)
| |
"""The tests the for Locative device tracker platform."""
from unittest.mock import patch, Mock
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import locative
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN
from homeassistant.components.locative import DOMAIN, TRACKER_UPDATE
from homeassistant.const import HTTP_OK, HTTP_UNPROCESSABLE_ENTITY
from homeassistant.helpers.dispatcher import DATA_DISPATCHER
from homeassistant.setup import async_setup_component
# pylint: disable=redefined-outer-name
@pytest.fixture(autouse=True)
def mock_dev_track(mock_device_tracker_conf):
"""Mock device tracker config loading."""
pass
@pytest.fixture
async def locative_client(loop, hass, hass_client):
"""Locative mock client."""
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
with patch("homeassistant.components.device_tracker.legacy.update_config"):
return await hass_client()
@pytest.fixture
async def webhook_id(hass, locative_client):
"""Initialize the Geofency component and get the webhook_id."""
hass.config.api = Mock(base_url="http://example.com")
result = await hass.config_entries.flow.async_init(
"locative", context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
return result["result"].data["webhook_id"]
async def test_missing_data(locative_client, webhook_id):
"""Test missing data."""
url = "/api/webhook/{}".format(webhook_id)
data = {
"latitude": 1.0,
"longitude": 1.1,
"device": "123",
"id": "Home",
"trigger": "enter",
}
# No data
req = await locative_client.post(url)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No latitude
copy = data.copy()
del copy["latitude"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No device
copy = data.copy()
del copy["device"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No location
copy = data.copy()
del copy["id"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No trigger
copy = data.copy()
del copy["trigger"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# Test message
copy = data.copy()
copy["trigger"] = "test"
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_OK
# Test message, no location
copy = data.copy()
copy["trigger"] = "test"
del copy["id"]
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_OK
# Unknown trigger
copy = data.copy()
copy["trigger"] = "foobar"
req = await locative_client.post(url, data=copy)
assert req.status == HTTP_UNPROCESSABLE_ENTITY
async def test_enter_and_exit(hass, locative_client, webhook_id):
"""Test when there is a known zone."""
url = "/api/webhook/{}".format(webhook_id)
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "123",
"id": "Home",
"trigger": "enter",
}
# Enter the Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "home"
data["id"] = "HOME"
data["trigger"] = "exit"
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "not_home"
data["id"] = "hOmE"
data["trigger"] = "enter"
# Enter Home again
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "home"
data["trigger"] = "exit"
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "not_home"
data["id"] = "work"
data["trigger"] = "enter"
# Enter Work
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"])
).state
assert state_name == "work"
async def test_exit_after_enter(hass, locative_client, webhook_id):
"""Test when an exit message comes after an enter message."""
url = "/api/webhook/{}".format(webhook_id)
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "123",
"id": "Home",
"trigger": "enter",
}
# Enter Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "home"
data["id"] = "Work"
# Enter Work
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "work"
data["id"] = "Home"
data["trigger"] = "exit"
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "work"
async def test_exit_first(hass, locative_client, webhook_id):
"""Test when an exit message is sent first on a new device."""
url = "/api/webhook/{}".format(webhook_id)
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "new_device",
"id": "Home",
"trigger": "exit",
}
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "not_home"
async def test_two_devices(hass, locative_client, webhook_id):
"""Test updating two different devices."""
url = "/api/webhook/{}".format(webhook_id)
data_device_1 = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "device_1",
"id": "Home",
"trigger": "exit",
}
# Exit Home
req = await locative_client.post(url, data=data_device_1)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_1["device"])
)
assert state.state == "not_home"
# Enter Home
data_device_2 = dict(data_device_1)
data_device_2["device"] = "device_2"
data_device_2["trigger"] = "enter"
req = await locative_client.post(url, data=data_device_2)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_2["device"])
)
assert state.state == "home"
state = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_1["device"])
)
assert state.state == "not_home"
@pytest.mark.xfail(
reason="The device_tracker component does not support unloading yet."
)
async def test_load_unload_entry(hass, locative_client, webhook_id):
"""Test that the appropriate dispatch signals are added and removed."""
url = "/api/webhook/{}".format(webhook_id)
data = {
"latitude": 40.7855,
"longitude": -111.7367,
"device": "new_device",
"id": "Home",
"trigger": "exit",
}
# Exit Home
req = await locative_client.post(url, data=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["device"]))
assert state.state == "not_home"
assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
await locative.async_unload_entry(hass, entry)
await hass.async_block_till_done()
assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
| |
from __future__ import absolute_import
import errno
from datetime import datetime, timedelta
from mock import Mock, call, patch
from nose import SkipTest
from pickle import dumps, loads
from celery import beat
from celery.five import keys, string_t
from celery.schedules import schedule
from celery.utils import uuid
from celery.tests.case import AppCase
class Object(object):
pass
class MockShelve(dict):
closed = False
synced = False
def close(self):
self.closed = True
def sync(self):
self.synced = True
class MockService(object):
started = False
stopped = False
def __init__(self, *args, **kwargs):
pass
def start(self, **kwargs):
self.started = True
def stop(self, **kwargs):
self.stopped = True
class test_ScheduleEntry(AppCase):
Entry = beat.ScheduleEntry
def create_entry(self, **kwargs):
entry = dict(
name='celery.unittest.add',
schedule=timedelta(seconds=10),
args=(2, 2),
options={'routing_key': 'cpu'},
app=self.app,
)
return self.Entry(**dict(entry, **kwargs))
def test_next(self):
entry = self.create_entry(schedule=10)
self.assertTrue(entry.last_run_at)
self.assertIsInstance(entry.last_run_at, datetime)
self.assertEqual(entry.total_run_count, 0)
next_run_at = entry.last_run_at + timedelta(seconds=10)
next_entry = entry.next(next_run_at)
self.assertGreaterEqual(next_entry.last_run_at, next_run_at)
self.assertEqual(next_entry.total_run_count, 1)
def test_is_due(self):
entry = self.create_entry(schedule=timedelta(seconds=10))
self.assertIs(entry.app, self.app)
self.assertIs(entry.schedule.app, self.app)
due1, next_time_to_run1 = entry.is_due()
self.assertFalse(due1)
self.assertGreater(next_time_to_run1, 9)
next_run_at = entry.last_run_at - timedelta(seconds=10)
next_entry = entry.next(next_run_at)
due2, next_time_to_run2 = next_entry.is_due()
self.assertTrue(due2)
self.assertGreater(next_time_to_run2, 9)
def test_repr(self):
entry = self.create_entry()
self.assertIn('<Entry:', repr(entry))
def test_update(self):
entry = self.create_entry()
self.assertEqual(entry.schedule, timedelta(seconds=10))
self.assertTupleEqual(entry.args, (2, 2))
self.assertDictEqual(entry.kwargs, {})
self.assertDictEqual(entry.options, {'routing_key': 'cpu'})
entry2 = self.create_entry(schedule=timedelta(minutes=20),
args=(16, 16),
kwargs={'callback': 'foo.bar.baz'},
options={'routing_key': 'urgent'})
entry.update(entry2)
self.assertEqual(entry.schedule, schedule(timedelta(minutes=20)))
self.assertTupleEqual(entry.args, (16, 16))
self.assertDictEqual(entry.kwargs, {'callback': 'foo.bar.baz'})
self.assertDictEqual(entry.options, {'routing_key': 'urgent'})
class mScheduler(beat.Scheduler):
def __init__(self, *args, **kwargs):
self.sent = []
beat.Scheduler.__init__(self, *args, **kwargs)
def send_task(self, name=None, args=None, kwargs=None, **options):
self.sent.append({'name': name,
'args': args,
'kwargs': kwargs,
'options': options})
return self.app.AsyncResult(uuid())
class mSchedulerSchedulingError(mScheduler):
def send_task(self, *args, **kwargs):
raise beat.SchedulingError('Could not apply task')
class mSchedulerRuntimeError(mScheduler):
def maybe_due(self, *args, **kwargs):
raise RuntimeError('dict modified while itervalues')
class mocked_schedule(schedule):
def __init__(self, is_due, next_run_at):
self._is_due = is_due
self._next_run_at = next_run_at
self.run_every = timedelta(seconds=1)
self.nowfun = datetime.utcnow
def is_due(self, last_run_at):
return self._is_due, self._next_run_at
always_due = mocked_schedule(True, 1)
always_pending = mocked_schedule(False, 1)
class test_Scheduler(AppCase):
def test_custom_schedule_dict(self):
custom = {'foo': 'bar'}
scheduler = mScheduler(app=self.app, schedule=custom, lazy=True)
self.assertIs(scheduler.data, custom)
def test_apply_async_uses_registered_task_instances(self):
@self.app.task(shared=False)
def foo():
pass
foo.apply_async = Mock(name='foo.apply_async')
assert foo.name in foo._get_app().tasks
scheduler = mScheduler(app=self.app)
scheduler.apply_async(scheduler.Entry(task=foo.name, app=self.app))
self.assertTrue(foo.apply_async.called)
def test_apply_async_should_not_sync(self):
@self.app.task(shared=False)
def not_sync():
pass
not_sync.apply_async = Mock()
s = mScheduler(app=self.app)
s._do_sync = Mock()
s.should_sync = Mock()
s.should_sync.return_value = True
s.apply_async(s.Entry(task=not_sync.name, app=self.app))
s._do_sync.assert_called_with()
s._do_sync = Mock()
s.should_sync.return_value = False
s.apply_async(s.Entry(task=not_sync.name, app=self.app))
self.assertFalse(s._do_sync.called)
@patch('celery.app.base.Celery.send_task')
def test_send_task(self, send_task):
b = beat.Scheduler(app=self.app)
b.send_task('tasks.add', countdown=10)
send_task.assert_called_with('tasks.add', countdown=10)
def test_info(self):
scheduler = mScheduler(app=self.app)
self.assertIsInstance(scheduler.info, string_t)
def test_maybe_entry(self):
s = mScheduler(app=self.app)
entry = s.Entry(name='add every', task='tasks.add', app=self.app)
self.assertIs(s._maybe_entry(entry.name, entry), entry)
self.assertTrue(s._maybe_entry('add every', {
'task': 'tasks.add',
}))
def test_set_schedule(self):
s = mScheduler(app=self.app)
s.schedule = {'foo': 'bar'}
self.assertEqual(s.data, {'foo': 'bar'})
@patch('kombu.connection.Connection.ensure_connection')
def test_ensure_connection_error_handler(self, ensure):
s = mScheduler(app=self.app)
self.assertTrue(s._ensure_connected())
self.assertTrue(ensure.called)
callback = ensure.call_args[0][0]
callback(KeyError(), 5)
def test_install_default_entries(self):
self.app.conf.CELERY_TASK_RESULT_EXPIRES = None
self.app.conf.CELERYBEAT_SCHEDULE = {}
s = mScheduler(app=self.app)
s.install_default_entries({})
self.assertNotIn('celery.backend_cleanup', s.data)
self.app.backend.supports_autoexpire = False
self.app.conf.CELERY_TASK_RESULT_EXPIRES = 30
s = mScheduler(app=self.app)
s.install_default_entries({})
self.assertIn('celery.backend_cleanup', s.data)
self.app.backend.supports_autoexpire = True
self.app.conf.CELERY_TASK_RESULT_EXPIRES = 31
s = mScheduler(app=self.app)
s.install_default_entries({})
self.assertNotIn('celery.backend_cleanup', s.data)
def test_due_tick(self):
scheduler = mScheduler(app=self.app)
scheduler.add(name='test_due_tick',
schedule=always_due,
args=(1, 2),
kwargs={'foo': 'bar'})
self.assertEqual(scheduler.tick(), 1)
@patch('celery.beat.error')
def test_due_tick_SchedulingError(self, error):
scheduler = mSchedulerSchedulingError(app=self.app)
scheduler.add(name='test_due_tick_SchedulingError',
schedule=always_due)
self.assertEqual(scheduler.tick(), 1)
self.assertTrue(error.called)
def test_due_tick_RuntimeError(self):
scheduler = mSchedulerRuntimeError(app=self.app)
scheduler.add(name='test_due_tick_RuntimeError',
schedule=always_due)
self.assertEqual(scheduler.tick(), scheduler.max_interval)
def test_pending_tick(self):
scheduler = mScheduler(app=self.app)
scheduler.add(name='test_pending_tick',
schedule=always_pending)
self.assertEqual(scheduler.tick(), 1)
def test_honors_max_interval(self):
scheduler = mScheduler(app=self.app)
maxi = scheduler.max_interval
scheduler.add(name='test_honors_max_interval',
schedule=mocked_schedule(False, maxi * 4))
self.assertEqual(scheduler.tick(), maxi)
def test_ticks(self):
scheduler = mScheduler(app=self.app)
nums = [600, 300, 650, 120, 250, 36]
s = dict(('test_ticks%s' % i,
{'schedule': mocked_schedule(False, j)})
for i, j in enumerate(nums))
scheduler.update_from_dict(s)
self.assertEqual(scheduler.tick(), min(nums))
def test_schedule_no_remain(self):
scheduler = mScheduler(app=self.app)
scheduler.add(name='test_schedule_no_remain',
schedule=mocked_schedule(False, None))
self.assertEqual(scheduler.tick(), scheduler.max_interval)
def test_interface(self):
scheduler = mScheduler(app=self.app)
scheduler.sync()
scheduler.setup_schedule()
scheduler.close()
def test_merge_inplace(self):
a = mScheduler(app=self.app)
b = mScheduler(app=self.app)
a.update_from_dict({'foo': {'schedule': mocked_schedule(True, 10)},
'bar': {'schedule': mocked_schedule(True, 20)}})
b.update_from_dict({'bar': {'schedule': mocked_schedule(True, 40)},
'baz': {'schedule': mocked_schedule(True, 10)}})
a.merge_inplace(b.schedule)
self.assertNotIn('foo', a.schedule)
self.assertIn('baz', a.schedule)
self.assertEqual(a.schedule['bar'].schedule._next_run_at, 40)
def create_persistent_scheduler(shelv=None):
if shelv is None:
shelv = MockShelve()
class MockPersistentScheduler(beat.PersistentScheduler):
sh = shelv
persistence = Object()
persistence.open = lambda *a, **kw: shelv
tick_raises_exit = False
shutdown_service = None
def tick(self):
if self.tick_raises_exit:
raise SystemExit()
if self.shutdown_service:
self.shutdown_service._is_shutdown.set()
return 0.0
return MockPersistentScheduler, shelv
class test_PersistentScheduler(AppCase):
@patch('os.remove')
def test_remove_db(self, remove):
s = create_persistent_scheduler()[0](app=self.app,
schedule_filename='schedule')
s._remove_db()
remove.assert_has_calls(
[call('schedule' + suffix) for suffix in s.known_suffixes]
)
err = OSError()
err.errno = errno.ENOENT
remove.side_effect = err
s._remove_db()
err.errno = errno.EPERM
with self.assertRaises(OSError):
s._remove_db()
def test_setup_schedule(self):
s = create_persistent_scheduler()[0](app=self.app,
schedule_filename='schedule')
opens = s.persistence.open = Mock()
s._remove_db = Mock()
def effect(*args, **kwargs):
if opens.call_count > 1:
return s.sh
raise OSError()
opens.side_effect = effect
s.setup_schedule()
s._remove_db.assert_called_with()
s._store = {'__version__': 1}
s.setup_schedule()
s._store.clear = Mock()
op = s.persistence.open = Mock()
op.return_value = s._store
s._store['tz'] = 'FUNKY'
s.setup_schedule()
op.assert_called_with(s.schedule_filename, writeback=True)
s._store.clear.assert_called_with()
s._store['utc_enabled'] = False
s._store.clear = Mock()
s.setup_schedule()
s._store.clear.assert_called_with()
def test_get_schedule(self):
s = create_persistent_scheduler()[0](
schedule_filename='schedule', app=self.app,
)
s._store = {'entries': {}}
s.schedule = {'foo': 'bar'}
self.assertDictEqual(s.schedule, {'foo': 'bar'})
self.assertDictEqual(s._store['entries'], s.schedule)
class test_Service(AppCase):
def get_service(self):
Scheduler, mock_shelve = create_persistent_scheduler()
return beat.Service(app=self.app, scheduler_cls=Scheduler), mock_shelve
def test_pickleable(self):
s = beat.Service(app=self.app, scheduler_cls=Mock)
self.assertTrue(loads(dumps(s)))
def test_start(self):
s, sh = self.get_service()
schedule = s.scheduler.schedule
self.assertIsInstance(schedule, dict)
self.assertIsInstance(s.scheduler, beat.Scheduler)
scheduled = list(schedule.keys())
for task_name in keys(sh['entries']):
self.assertIn(task_name, scheduled)
s.sync()
self.assertTrue(sh.closed)
self.assertTrue(sh.synced)
self.assertTrue(s._is_stopped.isSet())
s.sync()
s.stop(wait=False)
self.assertTrue(s._is_shutdown.isSet())
s.stop(wait=True)
self.assertTrue(s._is_shutdown.isSet())
p = s.scheduler._store
s.scheduler._store = None
try:
s.scheduler.sync()
finally:
s.scheduler._store = p
def test_start_embedded_process(self):
s, sh = self.get_service()
s._is_shutdown.set()
s.start(embedded_process=True)
def test_start_thread(self):
s, sh = self.get_service()
s._is_shutdown.set()
s.start(embedded_process=False)
def test_start_tick_raises_exit_error(self):
s, sh = self.get_service()
s.scheduler.tick_raises_exit = True
s.start()
self.assertTrue(s._is_shutdown.isSet())
def test_start_manages_one_tick_before_shutdown(self):
s, sh = self.get_service()
s.scheduler.shutdown_service = s
s.start()
self.assertTrue(s._is_shutdown.isSet())
class test_EmbeddedService(AppCase):
def test_start_stop_process(self):
try:
import _multiprocessing # noqa
except ImportError:
raise SkipTest('multiprocessing not available')
from billiard.process import Process
s = beat.EmbeddedService(app=self.app)
self.assertIsInstance(s, Process)
self.assertIsInstance(s.service, beat.Service)
s.service = MockService()
class _Popen(object):
terminated = False
def terminate(self):
self.terminated = True
s.run()
self.assertTrue(s.service.started)
s._popen = _Popen()
s.stop()
self.assertTrue(s.service.stopped)
self.assertTrue(s._popen.terminated)
def test_start_stop_threaded(self):
s = beat.EmbeddedService(thread=True, app=self.app)
from threading import Thread
self.assertIsInstance(s, Thread)
self.assertIsInstance(s.service, beat.Service)
s.service = MockService()
s.run()
self.assertTrue(s.service.started)
s.stop()
self.assertTrue(s.service.stopped)
class test_schedule(AppCase):
def test_maybe_make_aware(self):
x = schedule(10, app=self.app)
x.utc_enabled = True
d = x.maybe_make_aware(datetime.utcnow())
self.assertTrue(d.tzinfo)
x.utc_enabled = False
d2 = x.maybe_make_aware(datetime.utcnow())
self.assertIsNone(d2.tzinfo)
def test_to_local(self):
x = schedule(10, app=self.app)
x.utc_enabled = True
d = x.to_local(datetime.utcnow())
self.assertIsNone(d.tzinfo)
x.utc_enabled = False
d = x.to_local(datetime.utcnow())
self.assertTrue(d.tzinfo)
| |
import logging
log = logging.getLogger(__name__)
from flask import request
from bokeh import protocol
from .bbauth import (
check_read_authentication_and_create_client,
check_write_authentication_and_create_client
)
from ..app import bokeh_app
from ..crossdomain import crossdomain
from ..serverbb import prune
from ..views import make_json
def init_bokeh(clientdoc):
request.bokeh_server_document = clientdoc
clientdoc.autostore = False
clientdoc.autoadd = False
# Management Functions
@bokeh_app.route("/bokeh/bb/<docid>/reset", methods=['GET'])
@check_write_authentication_and_create_client
def reset(docid):
''' Reset a specified :class:`Document <bokeh.document.Document>`.
Deletes all stored objects except for the current
:class:`PlotContext <bokeh.objects.PlotContext>`, which has all of
its children removed.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to reset
:status 200: when user is authorized
:status 401: when user is not authorized
'''
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
for m in clientdoc._models:
if not m.typename.endswith('PlotContext'):
bokeh_app.backbone_storage.del_obj(docid, m)
else:
m.children = []
bokeh_app.backbone_storage.store_objects(docid, m)
return 'success'
@bokeh_app.route("/bokeh/bb/<docid>/rungc", methods=['GET'])
@check_write_authentication_and_create_client
def rungc(docid):
''' Run the Bokeh Server garbage collector for a given
:class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to collect
:status 200: when user is authorized
:status 401: when user is not authorized
'''
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc, delete=True)
return 'success'
@bokeh_app.route("/bokeh/bb/<docid>/callbacks", methods=['POST'])
@check_write_authentication_and_create_client
def callbacks_post(docid):
''' Update callbacks for a given :class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to update callbacks for
:status 200: when user is authorized
:status 401: when user is not authorized
'''
# broken...
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
jsondata = protocol.deserialize_json(request.data.decode('utf-8'))
bokeh_app.backbone_storage.push_callbacks(jsondata)
return make_json(protocol.serialize_json(jsondata))
@bokeh_app.route("/bokeh/bb/<docid>/callbacks", methods=['GET'])
@check_write_authentication_and_create_client
def callbacks_get(docid):
''' Retrieve callbacks for a given :class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to get callbacks for
:status 200: when user is authorized
:status 401: when user is not authorized
'''
# broken...
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
jsondata = bokeh_app.backbone_storage.load_callbacks()
return make_json(protocol.serialize_json(jsondata))
# bulk upsert
@bokeh_app.route("/bokeh/bb/<docid>/bulkupsert", methods=['POST'])
@check_write_authentication_and_create_client
def bulk_upsert(docid):
''' Update or insert new objects for a given :class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to update or insert into
:status 200: when user is authorized
:status 401: when user is not authorized
'''
# endpoint is only used by python, therefore we don't process
# callbacks here
client = request.headers.get('client', 'python')
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
data = protocol.deserialize_json(request.data.decode('utf-8'))
if client == 'python':
clientdoc.load(*data, events='none', dirty=True)
else:
clientdoc.load(*data, events='existing', dirty=True)
changed = bokeh_app.backbone_storage.store_document(clientdoc)
msg = ws_update(clientdoc, changed)
return make_json(msg)
def ws_update(clientdoc, models):
attrs = clientdoc.dump(*models)
msg = protocol.serialize_json({'msgtype' : 'modelpush',
'modelspecs' : attrs
})
bokeh_app.publisher.send("bokehplot:" + clientdoc.docid, msg)
return msg
def ws_delete(clientdoc, models):
attrs = clientdoc.dump(*models)
msg = {
'msgtype' : 'modeldel',
'modelspecs' : attrs,
}
msg = protocol.serialize_json(msg)
bokeh_app.wsmanager.send("bokehplot:" + clientdoc.docid, msg)
return msg
# backbone functionality
@bokeh_app.route("/bokeh/bb/<docid>/<typename>/", methods=['POST'])
@check_write_authentication_and_create_client
def create(docid, typename):
''' Update or insert new objects for a given :class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to update or insert into
:status 200: when user is authorized
:status 401: when user is not authorized
'''
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
modeldata = protocol.deserialize_json(request.data.decode('utf-8'))
modeldata = {'type' : typename,
'attributes' : modeldata}
clientdoc.load(modeldata, dirty=True)
bokeh_app.backbone_storage.store_document(clientdoc)
ws_update(clientdoc, modeldata)
return protocol.serialize_json(modeldata[0]['attributes'])
@check_read_authentication_and_create_client
def _bulkget(docid, typename=None):
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
all_models = clientdoc._models.values()
if typename is not None:
attrs = clientdoc.dump(*[x for x in all_models \
if x.__view_model__==typename])
attrs = [x['attributes'] for x in attrs]
return make_json(protocol.serialize_json(attrs))
else:
attrs = clientdoc.dump(*all_models)
return make_json(protocol.serialize_json(attrs))
@bokeh_app.route("/bokeh/bb/<docid>/", methods=['GET'])
def bulkget_without_typename(docid):
''' Retrieve all objects for a given :class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to update or insert into
:status 200: when user is authorized
:status 401: when user is not authorized
'''
return _bulkget(docid)
@bokeh_app.route("/bokeh/bb/<docid>/<typename>/", methods=['GET'])
def bulkget_with_typename(docid):
''' Retrieve all objects of a specified typename for a
given :class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to update or insert into
:param typename: the type of objects to find and return
:status 200: when user is authorized
:status 401: when user is not authorized
'''
return _bulkget(docid, typename)
@crossdomain(origin="*", methods=['PATCH', 'GET', 'PUT'], headers=['BOKEH-API-KEY', 'Continuum-Clientid', 'Content-Type'])
def _handle_specific_model(docid, typename, id, method):
if method == 'PUT':
return update(docid, typename, id)
elif method == 'PATCH':
return update(docid, typename, id)
elif method == 'GET':
return getbyid(docid, typename, id)
elif method == 'DELETE':
return delete(docid, typename, id)
# route for working with individual models
@bokeh_app.route("/bokeh/bb/<docid>/<typename>/<id>/", methods=['GET'])
def _handle_specific_model_get(docid, typename, id):
''' Retrieve a specific model with a given id and typename for a
given :class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to update or insert into
:param typename: the type of objects to find and return
:param id: unique id of the object to retrieve
:status 200: when user is authorized
:status 401: when user is not authorized
'''
return _handle_specific_model(docid, typename, id, request.method)
@bokeh_app.route("/bokeh/bb/<docid>/<typename>/<id>/", methods=['OPTIONS'])
def _handle_specific_model_options(docid, typename, id):
''' Retrieve crossdomain options for a specific model with a
given id and typename for a given :class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to update or insert into
:param typename: the type of objects to find and return
:param id: unique id of the object to retrieve
:status 200:
'''
return _handle_specific_model(docid, typename, id, request.method)
@bokeh_app.route("/bokeh/bb/<docid>/<typename>/<id>/", methods=['PUT'])
def _handle_specific_model_put(docid, typename, id):
''' Update a specific model with a given id and typename for a
given :class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to update or insert into
:param typename: the type of objects to find and return
:param id: unique id of the object to retrieve
:status 200: when user is authorized
:status 401: when user is not authorized
'''
return _handle_specific_model(docid, typename, id, request.method)
@bokeh_app.route("/bokeh/bb/<docid>/<typename>/<id>/", methods=['PATCH'])
def _handle_specific_model_patch(docid, typename, id):
''' Update a specific model with a given id and typename for a
given :class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to update or insert into
:param typename: the type of objects to find and return
:param id: unique id of the object to retrieve
:status 200: when user is authorized
:status 401: when user is not authorized
'''
return _handle_specific_model(docid, typename, id, request.method)
@bokeh_app.route("/bokeh/bb/<docid>/<typename>/<id>/", methods=['DELETE'])
def _handle_specific_model_delete(docid, typename, id):
''' Delete a specific model with a given id and typename for a
given :class:`Document <bokeh.document.Document>`.
:param docid: id of the :class:`Document <bokeh.document.Document>`
to update or insert into
:param typename: the type of objects to find and return
:param id: unique id of the object to retrieve
:status 200: when user is authorized
:status 401: when user is not authorized
'''
return _handle_specific_model(docid, typename, id, request.method)
# individual model methods
@check_read_authentication_and_create_client
def getbyid(docid, typename, id):
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
attr = clientdoc.dump(clientdoc._models[id])[0]['attributes']
return make_json(protocol.serialize_json(attr))
@check_write_authentication_and_create_client
def update(docid, typename, id):
"""we need to distinguish between writing and patching models
namely in writing, we shouldn't remove unspecified attrs
(we currently don't handle this correctly)
"""
clientdoc = bokeh_app.backbone_storage.get_document(docid)
log.info("loading done %s", len(clientdoc._models.values()))
prune(clientdoc)
init_bokeh(clientdoc)
log.info("updating")
modeldata = protocol.deserialize_json(request.data.decode('utf-8'))
# patch id is not passed...
modeldata['id'] = id
modeldata = {'type' : typename,
'attributes' : modeldata}
clientdoc.load(modeldata, events='existing', dirty=True)
log.info("done")
log.info("saving")
changed = bokeh_app.backbone_storage.store_document(clientdoc)
log.debug("changed, %s", str(changed))
ws_update(clientdoc, changed)
log.debug("update, %s, %s", docid, typename)
# backbone expects us to send back attrs of this model, but it doesn't
# make sense to do so because we modify other models, and we want this to
# all go out over the websocket channel
return make_json(protocol.serialize_json({'noop' : True}))
@check_write_authentication_and_create_client
def delete(docid, typename, id):
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
model = clientdoc._models[id]
clientdoc.del_obj(docid, model)
ws_delete(clientdoc, [model])
return make_json(protocol.serialize_json(clientdoc.dump(model)[0]['attributes']))
# rpc route
@bokeh_app.route("/bokeh/bb/rpc/<docid>/<typename>/<id>/<funcname>/", methods=['POST', 'OPTIONS'])
@crossdomain(origin="*", methods=['POST'], headers=['BOKEH-API-KEY', 'Continuum-Clientid', 'Content-Type'])
@check_write_authentication_and_create_client
def rpc(docid, typename, id, funcname):
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
model = clientdoc._models[id]
data = protocol.deserialize_json(request.data.decode('utf-8'))
args = data.get('args', [])
kwargs = data.get('kwargs', {})
result = getattr(model, funcname)(*args, **kwargs)
log.debug("rpc, %s, %s", docid, typename)
changed = bokeh_app.backbone_storage.store_document(clientdoc)
ws_update(clientdoc, changed)
return make_json(protocol.serialize_json(result))
| |
""" Testing DTI
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from nose.tools import (assert_true, assert_equal,
assert_almost_equal, assert_raises)
import numpy.testing as npt
from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_
import nibabel as nib
import scipy.optimize as opt
import dipy.reconst.dti as dti
from dipy.reconst.dti import (axial_diffusivity, color_fa,
fractional_anisotropy, from_lower_triangular,
lower_triangular, mean_diffusivity,
radial_diffusivity, TensorModel, trace,
linearity, planarity, sphericity)
from dipy.io.bvectxt import read_bvec_file
from dipy.data import get_data, dsi_voxels, get_sphere
from dipy.core.subdivide_octahedron import create_unit_sphere
from dipy.reconst.odf import gfa
import dipy.core.gradients as grad
import dipy.core.sphere as dps
from dipy.sims.voxel import single_tensor
def test_roll_evals():
"""
"""
# Just making sure this never passes through
weird_evals = np.array([1, 0.5])
npt.assert_raises(ValueError, dti._roll_evals, weird_evals)
def test_tensor_algebra():
"""
Test that the computation of tensor determinant and norm is correct
"""
test_arr = np.random.rand(10, 3, 3)
t_det = dti.determinant(test_arr)
t_norm = dti.norm(test_arr)
for i, x in enumerate(test_arr):
assert_almost_equal(np.linalg.det(x), t_det[i])
assert_almost_equal(np.linalg.norm(x), t_norm[i])
def test_TensorModel():
data, gtab = dsi_voxels()
dm = dti.TensorModel(gtab, 'LS')
dtifit = dm.fit(data[0, 0, 0])
assert_equal(dtifit.fa < 0.5, True)
dm = dti.TensorModel(gtab, 'WLS')
dtifit = dm.fit(data[0, 0, 0])
assert_equal(dtifit.fa < 0.5, True)
sphere = create_unit_sphere(4)
assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
assert_almost_equal(dtifit.fa, gfa(dtifit.odf(sphere)), 1)
# Check that the multivoxel case works:
dtifit = dm.fit(data)
# And smoke-test that all these operations return sensibly-shaped arrays:
assert_equal(dtifit.fa.shape, data.shape[:3])
assert_equal(dtifit.ad.shape, data.shape[:3])
assert_equal(dtifit.md.shape, data.shape[:3])
assert_equal(dtifit.rd.shape, data.shape[:3])
assert_equal(dtifit.trace.shape, data.shape[:3])
assert_equal(dtifit.mode.shape, data.shape[:3])
assert_equal(dtifit.linearity.shape, data.shape[:3])
assert_equal(dtifit.planarity.shape, data.shape[:3])
assert_equal(dtifit.sphericity.shape, data.shape[:3])
# Test for the shape of the mask
assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3,3)))
# Make some synthetic data
b0 = 1000.
bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
# The first b value is 0., so we take the second one:
B = bvals[1]
# Scale the eigenvalues and tensor by the B value so the units match
D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
evals = np.array([2., 1., 0.]) / B
md = evals.mean()
tensor = from_lower_triangular(D)
A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
mode = 3 * np.sqrt(6) * np.linalg.det(A_squiggle / np.linalg.norm(A_squiggle))
evecs = np.linalg.eigh(tensor)[1]
# Design Matrix
X = dti.design_matrix(gtab)
# Signals
Y = np.exp(np.dot(X, D))
assert_almost_equal(Y[0], b0)
Y.shape = (-1,) + Y.shape
# Test fitting with different methods:
for fit_method in ['OLS', 'WLS', 'NLLS']:
tensor_model = dti.TensorModel(gtab,
fit_method=fit_method)
tensor_fit = tensor_model.fit(Y)
assert_true(tensor_fit.model is tensor_model)
assert_equal(tensor_fit.shape, Y.shape[:-1])
assert_array_almost_equal(tensor_fit.evals[0], evals)
assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
err_msg=\
"Calculation of tensor from Y does not compare to analytical solution")
assert_almost_equal(tensor_fit.md[0], md)
assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
assert_equal(tensor_fit.directions.shape[-2], 1)
assert_equal(tensor_fit.directions.shape[-1], 3)
# Test error-handling:
assert_raises(ValueError,
dti.TensorModel,
gtab,
fit_method='crazy_method')
def test_indexing_on_TensorFit():
params = np.zeros([2, 3, 4, 12])
fit = dti.TensorFit(None, params)
# Should return a TensorFit of appropriate shape
assert_equal(fit.shape, (2, 3, 4))
fit1 = fit[0]
assert_equal(fit1.shape, (3, 4))
assert_equal(type(fit1), dti.TensorFit)
fit1 = fit[0, 0, 0]
assert_equal(fit1.shape, ())
assert_equal(type(fit1), dti.TensorFit)
fit1 = fit[[0], slice(None)]
assert_equal(fit1.shape, (1, 3, 4))
assert_equal(type(fit1), dti.TensorFit)
# Should raise an index error if too many indices are passed
assert_raises(IndexError, fit.__getitem__, (0, 0, 0, 0))
def test_fa_of_zero():
evals = np.zeros((4, 3))
fa = fractional_anisotropy(evals)
assert_array_equal(fa, 0)
def test_diffusivities():
psphere = get_sphere('symmetric362')
bvecs = np.concatenate(([[0, 0, 0]], psphere.vertices))
bvals = np.zeros(len(bvecs)) + 1000
bvals[0] = 0
gtab = grad.gradient_table(bvals, bvecs)
mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003]))
mevecs = [ np.array( [ [1, 0, 0], [0, 1, 0], [0, 0, 1] ] ),
np.array( [ [0, 0, 1], [0, 1, 0], [1, 0, 0] ] ) ]
S = single_tensor( gtab, 100, mevals[0], mevecs[0], snr=None )
dm = dti.TensorModel(gtab, 'LS')
dmfit = dm.fit(S)
md = mean_diffusivity(dmfit.evals)
Trace = trace(dmfit.evals)
rd = radial_diffusivity(dmfit.evals)
ad = axial_diffusivity(dmfit.evals)
lin = linearity(dmfit.evals)
plan = planarity(dmfit.evals)
spher = sphericity(dmfit.evals)
assert_almost_equal(md, (0.0015 + 0.0003 + 0.0001) / 3)
assert_almost_equal(Trace, (0.0015 + 0.0003 + 0.0001))
assert_almost_equal(ad, 0.0015)
assert_almost_equal(rd, (0.0003 + 0.0001) / 2)
assert_almost_equal(lin, (0.0015 - 0.0003)/Trace)
assert_almost_equal(plan, 2 * (0.0003 - 0.0001)/Trace)
assert_almost_equal(spher, (3 * 0.0001)/Trace)
def test_color_fa():
data, gtab = dsi_voxels()
dm = dti.TensorModel(gtab, 'LS')
dmfit = dm.fit(data)
fa = fractional_anisotropy(dmfit.evals)
cfa = color_fa(fa, dmfit.evecs)
fa = np.ones((3, 3, 3))
# evecs should be of shape (fa, 3, 3)
evecs = np.zeros(fa.shape + (3,2))
npt.assert_raises(ValueError, color_fa, fa, evecs)
evecs = np.zeros(fa.shape + (3, 3))
evecs[..., :, :] = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
assert_equal(fa.shape, evecs[..., 0, 0].shape)
assert_equal((3, 3), evecs.shape[-2:])
# 3D test case
fa = np.ones((3, 3, 3))
evecs = np.zeros(fa.shape + (3, 3))
evecs[..., :, :] = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
cfa = color_fa(fa, evecs)
cfa_truth = np.array([1, 0, 0])
true_cfa = np.reshape(np.tile(cfa_truth, 27), [3, 3, 3, 3])
assert_array_equal(cfa, true_cfa)
# 2D test case
fa = np.ones((3, 3))
evecs = np.zeros(fa.shape + (3, 3))
evecs[..., :, :] = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
cfa = color_fa(fa, evecs)
cfa_truth = np.array([1, 0, 0])
true_cfa = np.reshape(np.tile(cfa_truth, 9), [3, 3, 3])
assert_array_equal(cfa, true_cfa)
# 1D test case
fa = np.ones((3))
evecs = np.zeros(fa.shape + (3, 3))
evecs[..., :, :] = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
cfa = color_fa(fa, evecs)
cfa_truth = np.array([1, 0, 0])
true_cfa = np.reshape(np.tile(cfa_truth, 3), [3, 3])
assert_array_equal(cfa, true_cfa)
def test_WLS_and_LS_fit():
"""
Tests the WLS and LS fitting functions to see if they returns the correct
eigenvalues and eigenvectors.
Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
as the data.
"""
### Defining Test Voxel (avoid nibabel dependency) ###
#Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
b0 = 1000.
bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
B = bval[1]
#Scale the eigenvalues and tensor by the B value so the units match
D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
evals = np.array([2., 1., 0.]) / B
md = evals.mean()
tensor = from_lower_triangular(D)
#Design Matrix
gtab = grad.gradient_table(bval, bvec)
X = dti.design_matrix(gtab)
#Signals
Y = np.exp(np.dot(X, D))
assert_almost_equal(Y[0], b0)
Y.shape = (-1,) + Y.shape
### Testing WLS Fit on Single Voxel ###
# If you do something wonky, you should get an error:
#Estimate tensor from test signals
model = TensorModel(gtab, min_signal=-1, fit_method='WLS')
npt.assert_raises(ValueError, model.fit, Y)
#Estimate tensor from test signals
model = TensorModel(gtab, min_signal=1e-8, fit_method='WLS')
tensor_est = model.fit(Y)
assert_equal(tensor_est.shape, Y.shape[:-1])
assert_array_almost_equal(tensor_est.evals[0], evals)
assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
err_msg="Calculation of tensor from Y does not "
"compare to analytical solution")
assert_almost_equal(tensor_est.md[0], md)
# Test that we can fit a single voxel's worth of data (a 1d array)
y = Y[0]
tensor_est = model.fit(y)
assert_equal(tensor_est.shape, tuple())
assert_array_almost_equal(tensor_est.evals, evals)
assert_array_almost_equal(tensor_est.quadratic_form, tensor)
assert_almost_equal(tensor_est.md, md)
assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
# Test using fit_method='LS'
model = TensorModel(gtab, min_signal=1e-8, fit_method='LS')
tensor_est = model.fit(y)
assert_equal(tensor_est.shape, tuple())
assert_array_almost_equal(tensor_est.evals, evals)
assert_array_almost_equal(tensor_est.quadratic_form, tensor)
assert_almost_equal(tensor_est.md, md)
assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
assert_array_almost_equal(tensor_est.linearity, linearity(evals))
assert_array_almost_equal(tensor_est.planarity, planarity(evals))
assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))
def test_masked_array_with_Tensor():
data = np.ones((2, 4, 56))
mask = np.array([[True, False, False, True],
[True, False, True, False]])
bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T)
tensor_model = TensorModel(gtab, min_signal=1e-9)
tensor = tensor_model.fit(data, mask=mask)
assert_equal(tensor.shape, (2, 4))
assert_equal(tensor.fa.shape, (2, 4))
assert_equal(tensor.evals.shape, (2, 4, 3))
assert_equal(tensor.evecs.shape, (2, 4, 3, 3))
tensor = tensor[0]
assert_equal(tensor.shape, (4,))
assert_equal(tensor.fa.shape, (4,))
assert_equal(tensor.evals.shape, (4, 3))
assert_equal(tensor.evecs.shape, (4, 3, 3))
tensor = tensor[0]
assert_equal(tensor.shape, tuple())
assert_equal(tensor.fa.shape, tuple())
assert_equal(tensor.evals.shape, (3,))
assert_equal(tensor.evecs.shape, (3, 3))
assert_equal(type(tensor.model_params), np.ndarray)
def test_fit_method_error():
bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T)
# This should work
tensor_model = TensorModel(gtab, fit_method='WLS')
# This should raise an error because there is no such fit_method
assert_raises(ValueError, TensorModel, gtab, min_signal=1e-9,
fit_method='s')
def test_lower_triangular():
tensor = np.arange(9).reshape((3, 3))
D = lower_triangular(tensor)
assert_array_equal(D, [0, 3, 4, 6, 7, 8])
D = lower_triangular(tensor, 1)
assert_array_equal(D, [0, 3, 4, 6, 7, 8, 0])
assert_raises(ValueError, lower_triangular, np.zeros((2, 3)))
shape = (4, 5, 6)
many_tensors = np.empty(shape + (3, 3))
many_tensors[:] = tensor
result = np.empty(shape + (6,))
result[:] = [0, 3, 4, 6, 7, 8]
D = lower_triangular(many_tensors)
assert_array_equal(D, result)
D = lower_triangular(many_tensors, 1)
result = np.empty(shape + (7,))
result[:] = [0, 3, 4, 6, 7, 8, 0]
assert_array_equal(D, result)
def test_from_lower_triangular():
result = np.array([[0, 1, 3],
[1, 2, 4],
[3, 4, 5]])
D = np.arange(7)
tensor = from_lower_triangular(D)
assert_array_equal(tensor, result)
result = result * np.ones((5, 4, 1, 1))
D = D * np.ones((5, 4, 1))
tensor = from_lower_triangular(D)
assert_array_equal(tensor, result)
def test_all_constant():
bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
fit_methods = ['LS', 'OLS', 'NNLS']
for fit_method in fit_methods:
dm = dti.TensorModel(gtab)
assert_almost_equal(dm.fit(np.zeros(bvals.shape[0])).fa, 0)
assert_almost_equal(dm.fit(100 * np.ones(bvals.shape[0])).fa, 0)
def test_mask():
data, gtab = dsi_voxels()
dm = dti.TensorModel(gtab, 'LS')
mask = np.zeros(data.shape[:-1], dtype=bool)
mask[0, 0, 0] = True
dtifit = dm.fit(data)
dtifit_w_mask = dm.fit(data, mask=mask)
# Without a mask it has some value
assert_(not np.isnan(dtifit.fa[0, 0, 0]))
# Where mask is False, evals, evecs and fa should all be 0
assert_array_equal(dtifit_w_mask.evals[~mask], 0)
assert_array_equal(dtifit_w_mask.evecs[~mask], 0)
assert_array_equal(dtifit_w_mask.fa[~mask], 0)
# Except for the one voxel that was selected by the mask:
assert_almost_equal(dtifit_w_mask.fa[0, 0, 0], dtifit.fa[0, 0, 0])
def test_nnls_jacobian_fucn():
b0 = 1000.
bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
gtab = grad.gradient_table(bval, bvecs)
B = bval[1]
#Scale the eigenvalues and tensor by the B value so the units match
D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
evals = np.array([2., 1., 0.]) / B
#Design Matrix
X = dti.design_matrix(gtab)
#Signals
Y = np.exp(np.dot(X,D))
# Test Jacobian at D
args = [X, Y]
analytical = dti._nlls_jacobian_func(D, *args)
for i in range(len(X)):
args = [X[i], Y[i]]
approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)
assert_true(np.allclose(approx, analytical[i]))
# Test Jacobian at zero
D = np.zeros_like(D)
args = [X, Y]
analytical = dti._nlls_jacobian_func(D, *args)
for i in range(len(X)):
args = [X[i], Y[i]]
approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)
assert_true(np.allclose(approx, analytical[i]))
def test_nlls_fit_tensor():
"""
Test the implementation of NLLS and RESTORE
"""
b0 = 1000.
bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
gtab = grad.gradient_table(bval, bvecs)
B = bval[1]
#Scale the eigenvalues and tensor by the B value so the units match
D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
evals = np.array([2., 1., 0.]) / B
md = evals.mean()
tensor = from_lower_triangular(D)
#Design Matrix
X = dti.design_matrix(gtab)
#Signals
Y = np.exp(np.dot(X,D))
Y.shape = (-1,) + Y.shape
#Estimate tensor from test signals and compare against expected result
#using non-linear least squares:
tensor_model = dti.TensorModel(gtab, fit_method='NLLS')
tensor_est = tensor_model.fit(Y)
assert_equal(tensor_est.shape, Y.shape[:-1])
assert_array_almost_equal(tensor_est.evals[0], evals)
assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
assert_almost_equal(tensor_est.md[0], md)
# You can also do this without the Jacobian (though it's slower):
tensor_model = dti.TensorModel(gtab, fit_method='NLLS', jac=False)
tensor_est = tensor_model.fit(Y)
assert_equal(tensor_est.shape, Y.shape[:-1])
assert_array_almost_equal(tensor_est.evals[0], evals)
assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
assert_almost_equal(tensor_est.md[0], md)
# Using the gmm weighting scheme:
tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='gmm')
tensor_est = tensor_model.fit(Y)
assert_equal(tensor_est.shape, Y.shape[:-1])
assert_array_almost_equal(tensor_est.evals[0], evals)
assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
assert_almost_equal(tensor_est.md[0], md)
# If you use sigma weighting, you'd better provide a sigma:
tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='sigma')
npt.assert_raises(ValueError, tensor_model.fit, Y)
# Use NLLS with some actual 4D data:
data, bvals, bvecs = get_data('small_25')
gtab = grad.gradient_table(bvals, bvecs)
tm1 = dti.TensorModel(gtab, fit_method='NLLS')
dd = nib.load(data).get_data()
tf1 = tm1.fit(dd)
tm2 = dti.TensorModel(gtab)
tf2 = tm2.fit(dd)
assert_array_almost_equal(tf1.fa, tf2.fa, decimal=1)
def test_restore():
"""
Test the implementation of the RESTORE algorithm
"""
b0 = 1000.
bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
gtab = grad.gradient_table(bval, bvecs)
B = bval[1]
#Scale the eigenvalues and tensor by the B value so the units match
D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
evals = np.array([2., 1., 0.]) / B
md = evals.mean()
tensor = from_lower_triangular(D)
#Design Matrix
X = dti.design_matrix(gtab)
#Signals
Y = np.exp(np.dot(X,D))
Y.shape = (-1,) + Y.shape
for drop_this in range(1, Y.shape[-1]):
# RESTORE estimates should be robust to dropping
this_y = Y.copy()
this_y[:, drop_this] = 1.0
tensor_model = dti.TensorModel(gtab, fit_method='restore',
sigma=67.0)
tensor_est = tensor_model.fit(this_y)
assert_array_almost_equal(tensor_est.evals[0], evals, decimal=3)
assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
decimal=3)
def test_adc():
"""
Test the implementation of the calculation of apparent diffusion coefficient
"""
data, gtab = dsi_voxels()
dm = dti.TensorModel(gtab, 'LS')
mask = np.zeros(data.shape[:-1], dtype=bool)
mask[0, 0, 0] = True
dtifit = dm.fit(data)
sphere = create_unit_sphere(4)
# The ADC in the principal diffusion direction should be equal to the AD in
# each voxel:
pdd0 = dtifit.evecs[0,0,0,0]
sphere_pdd0 = dps.Sphere(x=pdd0[0], y=pdd0[1], z=pdd0[2])
assert_array_almost_equal(dtifit.adc(sphere_pdd0)[0,0,0],
dtifit.ad[0,0,0], decimal=5)
# Test that it works for cases in which the data is 1D
dtifit = dm.fit(data[0,0,0])
sphere_pdd0 = dps.Sphere(x=pdd0[0], y=pdd0[1], z=pdd0[2])
assert_array_almost_equal(dtifit.adc(sphere_pdd0),
dtifit.ad, decimal=5)
def test_predict():
"""
Test model prediction API
"""
psphere = get_sphere('symmetric362')
bvecs = np.concatenate(([[1, 0, 0]], psphere.vertices))
bvals = np.zeros(len(bvecs)) + 1000
bvals[0] = 0
gtab = grad.gradient_table(bvals, bvecs)
mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003]))
mevecs = [ np.array( [ [1, 0, 0], [0, 1, 0], [0, 0, 1] ] ),
np.array( [ [0, 0, 1], [0, 1, 0], [1, 0, 0] ] ) ]
S = single_tensor( gtab, 100, mevals[0], mevecs[0], snr=None )
dm = dti.TensorModel(gtab, 'LS')
dmfit = dm.fit(S)
assert_array_almost_equal(dmfit.predict(gtab, S0=100), S)
assert_array_almost_equal(dm.predict(dmfit.model_params, S0=100), S)
data, gtab = dsi_voxels()
dtim = dti.TensorModel(gtab)
dtif = dtim.fit(data)
S0 = np.mean(data[...,gtab.b0s_mask], -1)
p = dtif.predict(gtab, S0)
| |
#!/usr/bin/env python
"""Tests for administrative flows."""
import os
import subprocess
import sys
import time
import psutil
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import email_alerts
from grr.lib import flags
from grr.lib import flow
from grr.lib import maintenance_utils
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import utils
class AdministrativeFlowTests(test_lib.FlowTestsBaseclass):
pass
class TestAdministrativeFlows(AdministrativeFlowTests):
"""Tests the administrative flows."""
def setUp(self):
super(TestAdministrativeFlows, self).setUp()
test_tmp = os.environ.get("TEST_TMPDIR")
if test_tmp:
config_lib.CONFIG.Set("Client.tempdir", test_tmp)
def testUpdateConfig(self):
"""Ensure we can retrieve and update the config."""
# Only mock the pieces we care about.
client_mock = action_mocks.ActionMock("GetConfiguration",
"UpdateConfiguration")
loc = "http://www.example.com"
new_config = rdfvalue.Dict(
{"Client.control_urls": [loc],
"Client.foreman_check_frequency": 3600,
"Client.poll_min": 1})
# Write the config.
for _ in test_lib.TestFlowHelper("UpdateConfiguration", client_mock,
client_id=self.client_id,
token=self.token,
config=new_config):
pass
# Now retrieve it again to see if it got written.
for _ in test_lib.TestFlowHelper("Interrogate", client_mock,
token=self.token,
client_id=self.client_id):
pass
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
config_dat = fd.Get(fd.Schema.GRR_CONFIGURATION)
self.assertEqual(config_dat["Client.control_urls"], [loc])
self.assertEqual(config_dat["Client.poll_min"], 1)
def CheckCrash(self, crash, expected_session_id):
"""Checks that ClientCrash object's fields are correctly filled in."""
self.assertTrue(crash is not None)
self.assertEqual(crash.client_id, self.client_id)
self.assertEqual(crash.session_id, expected_session_id)
self.assertEqual(crash.client_info.client_name, "GRR Monitor")
self.assertEqual(crash.crash_type, "aff4:/flows/W:CrashHandler")
self.assertEqual(crash.crash_message,
"Client killed during transaction")
def testClientKilled(self):
"""Test that client killed messages are handled correctly."""
self.email_message = {}
def SendEmail(address, sender, title, message, **_):
self.email_message.update(dict(address=address, sender=sender,
title=title, message=message))
with utils.Stubber(email_alerts, "SendEmail", SendEmail):
client = test_lib.CrashClientMock(self.client_id, self.token)
for _ in test_lib.TestFlowHelper(
"FlowWithOneClientRequest", client, client_id=self.client_id,
token=self.token, check_flow_errors=False):
pass
# We expect the email to be sent.
self.assertEqual(self.email_message.get("address", ""),
config_lib.CONFIG["Monitoring.alert_email"])
self.assertTrue(str(self.client_id) in self.email_message["title"])
# Make sure the flow state is included in the email message.
for s in ["Flow name", "FlowWithOneClientRequest", "current_state"]:
self.assertTrue(s in self.email_message["message"])
flow_obj = aff4.FACTORY.Open(client.flow_id, age=aff4.ALL_TIMES,
token=self.token)
self.assertEqual(flow_obj.state.context.state, rdfvalue.Flow.State.ERROR)
# Make sure client object is updated with the last crash.
client_obj = aff4.FACTORY.Open(self.client_id, token=self.token)
crash = client_obj.Get(client_obj.Schema.LAST_CRASH)
self.CheckCrash(crash, flow_obj.session_id)
# Make sure crashes RDFValueCollections are created and written
# into proper locations. First check the per-client crashes collection.
client_crashes = sorted(
list(aff4.FACTORY.Open(self.client_id.Add("crashes"),
aff4_type="PackedVersionedCollection",
token=self.token)),
key=lambda x: x.timestamp)
self.assertTrue(len(client_crashes) >= 1)
crash = list(client_crashes)[0]
self.CheckCrash(crash, flow_obj.session_id)
# Check per-flow crash collection. Check that crash written there is
# equal to per-client crash.
flow_crashes = sorted(
list(flow_obj.GetValuesForAttribute(flow_obj.Schema.CLIENT_CRASH)),
key=lambda x: x.timestamp)
self.assertEqual(len(flow_crashes), len(client_crashes))
for a, b in zip(flow_crashes, client_crashes):
self.assertEqual(a, b)
# Check global crash collection. Check that crash written there is
# equal to per-client crash.
global_crashes = sorted(
aff4.FACTORY.Open(aff4.ROOT_URN.Add("crashes"),
aff4_type="PackedVersionedCollection",
token=self.token),
key=lambda x: x.timestamp)
self.assertEqual(len(global_crashes), len(client_crashes))
for a, b in zip(global_crashes, client_crashes):
self.assertEqual(a, b)
def testNannyMessage(self):
nanny_message = "Oh no!"
self.email_message = {}
def SendEmail(address, sender, title, message, **_):
self.email_message.update(dict(address=address, sender=sender,
title=title, message=message))
with utils.Stubber(email_alerts, "SendEmail", SendEmail):
msg = rdfvalue.GrrMessage(
session_id=rdfvalue.SessionID("aff4:/flows/W:NannyMessage"),
args=rdfvalue.DataBlob(string=nanny_message).SerializeToString(),
source=self.client_id,
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED)
# This is normally done by the FrontEnd when a CLIENT_KILLED message is
# received.
flow.Events.PublishEvent("NannyMessage", msg, token=self.token)
# Now emulate a worker to process the event.
worker = test_lib.MockWorker(token=self.token)
while worker.Next():
pass
worker.pool.Join()
# We expect the email to be sent.
self.assertEqual(self.email_message.get("address"),
config_lib.CONFIG["Monitoring.alert_email"])
self.assertTrue(str(self.client_id) in self.email_message["title"])
# Make sure the message is included in the email message.
self.assertTrue(nanny_message in self.email_message["message"])
# Make sure crashes RDFValueCollections are created and written
# into proper locations. First check the per-client crashes collection.
client_crashes = list(aff4.FACTORY.Open(
self.client_id.Add("crashes"),
aff4_type="PackedVersionedCollection",
token=self.token))
self.assertEqual(len(client_crashes), 1)
crash = client_crashes[0]
self.assertEqual(crash.client_id, self.client_id)
self.assertEqual(crash.client_info.client_name, "GRR Monitor")
self.assertEqual(crash.crash_type, "aff4:/flows/W:NannyMessage")
self.assertEqual(crash.crash_message, nanny_message)
# Check global crash collection. Check that crash written there is
# equal to per-client crash.
global_crashes = list(aff4.FACTORY.Open(
aff4.ROOT_URN.Add("crashes"),
aff4_type="PackedVersionedCollection",
token=self.token))
self.assertEqual(len(global_crashes), 1)
self.assertEqual(global_crashes[0], crash)
def testStartupHandler(self):
# Clean the client records.
aff4.FACTORY.Delete(self.client_id, token=self.token)
client_mock = action_mocks.ActionMock("SendStartupInfo")
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
# Check the client's boot time and info.
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
client_info = fd.Get(fd.Schema.CLIENT_INFO)
boot_time = fd.Get(fd.Schema.LAST_BOOT_TIME)
self.assertEqual(client_info.client_name,
config_lib.CONFIG["Client.name"])
self.assertEqual(client_info.client_description,
config_lib.CONFIG["Client.description"])
# Check that the boot time is accurate.
self.assertAlmostEqual(psutil.boot_time(), boot_time.AsSecondsFromEpoch())
# Run it again - this should not update any record.
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertEqual(boot_time.age, fd.Get(fd.Schema.LAST_BOOT_TIME).age)
self.assertEqual(client_info.age, fd.Get(fd.Schema.CLIENT_INFO).age)
# Simulate a reboot in 10 minutes.
current_boot_time = psutil.boot_time()
psutil.boot_time = lambda: current_boot_time + 600
# Run it again - this should now update the boot time.
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
# Ensure only this attribute is updated.
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertNotEqual(int(boot_time.age),
int(fd.Get(fd.Schema.LAST_BOOT_TIME).age))
self.assertEqual(int(client_info.age),
int(fd.Get(fd.Schema.CLIENT_INFO).age))
# Now set a new client build time.
config_lib.CONFIG.Set("Client.build_time", time.ctime())
# Run it again - this should now update the client info.
for _ in test_lib.TestFlowHelper(
"ClientActionRunner", client_mock, client_id=self.client_id,
action="SendStartupInfo", token=self.token):
pass
# Ensure the client info attribute is updated.
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertNotEqual(int(client_info.age),
int(fd.Get(fd.Schema.CLIENT_INFO).age))
def testExecutePythonHack(self):
client_mock = action_mocks.ActionMock("ExecutePython")
# This is the code we test. If this runs on the client mock we can check for
# this attribute.
sys.test_code_ran_here = False
code = """
import sys
sys.test_code_ran_here = True
"""
maintenance_utils.UploadSignedConfigBlob(
code, aff4_path="aff4:/config/python_hacks/test", token=self.token)
for _ in test_lib.TestFlowHelper(
"ExecutePythonHack", client_mock, client_id=self.client_id,
hack_name="test", token=self.token):
pass
self.assertTrue(sys.test_code_ran_here)
def testExecutePythonHackWithArgs(self):
client_mock = action_mocks.ActionMock("ExecutePython")
sys.test_code_ran_here = 1234
code = """
import sys
sys.test_code_ran_here = py_args['value']
"""
maintenance_utils.UploadSignedConfigBlob(
code, aff4_path="aff4:/config/python_hacks/test", token=self.token)
for _ in test_lib.TestFlowHelper(
"ExecutePythonHack", client_mock, client_id=self.client_id,
hack_name="test", py_args=dict(value=5678), token=self.token):
pass
self.assertEqual(sys.test_code_ran_here, 5678)
def testExecuteBinariesWithArgs(self):
client_mock = action_mocks.ActionMock("ExecuteBinaryCommand")
code = "I am a binary file"
upload_path = config_lib.CONFIG["Executables.aff4_path"].Add("test.exe")
maintenance_utils.UploadSignedConfigBlob(
code, aff4_path=upload_path, token=self.token)
class Popen(object):
"""A mock object for subprocess.Popen."""
def __init__(self, run, stdout, stderr, stdin):
Popen.running_args = run
Popen.stdout = stdout
Popen.stderr = stderr
Popen.stdin = stdin
Popen.returncode = 0
# Store the content of the executable file.
Popen.binary = open(run[0]).read()
def communicate(self): # pylint: disable=g-bad-name
return "stdout here", "stderr here"
# This flow has an acl, the user needs to be admin.
user = aff4.FACTORY.Create("aff4:/users/%s" % self.token.username,
mode="rw", aff4_type="GRRUser", token=self.token)
user.SetLabels("admin", owner="GRR")
user.Close()
with utils.Stubber(subprocess, "Popen", Popen):
for _ in test_lib.TestFlowHelper(
"LaunchBinary", client_mock, client_id=self.client_id,
binary=upload_path, command_line="--value 356", token=self.token):
pass
# Check that the executable file contains the code string.
self.assertEqual(Popen.binary, code)
# At this point, the actual binary should have been cleaned up by the
# client action so it should not exist.
self.assertRaises(IOError, open, Popen.running_args[0])
# Check the binary was run with the correct command line.
self.assertEqual(Popen.running_args[1], "--value")
self.assertEqual(Popen.running_args[2], "356")
# Check the command was in the tmp file.
self.assertTrue(Popen.running_args[0].startswith(
config_lib.CONFIG["Client.tempdir"]))
def testExecuteLargeBinaries(self):
client_mock = action_mocks.ActionMock("ExecuteBinaryCommand")
code = "I am a large binary file" * 100
upload_path = config_lib.CONFIG["Executables.aff4_path"].Add("test.exe")
maintenance_utils.UploadSignedConfigBlob(
code, aff4_path=upload_path, limit=100, token=self.token)
# Ensure the aff4 collection has many items.
fd = aff4.FACTORY.Open(upload_path, token=self.token)
# There should be 24 parts to this binary.
self.assertEqual(len(fd.collection), 24)
# Total size is 2400.
self.assertEqual(len(fd), 2400)
class Popen(object):
"""A mock object for subprocess.Popen."""
def __init__(self, run, stdout, stderr, stdin):
Popen.running_args = run
Popen.stdout = stdout
Popen.stderr = stderr
Popen.stdin = stdin
Popen.returncode = 0
# Store the content of the executable file.
Popen.binary = open(run[0]).read()
def communicate(self): # pylint: disable=g-bad-name
return "stdout here", "stderr here"
# This flow has an acl, the user needs to be admin.
user = aff4.FACTORY.Create("aff4:/users/%s" % self.token.username,
mode="rw", aff4_type="GRRUser", token=self.token)
user.SetLabels("admin", owner="GRR")
user.Close()
with utils.Stubber(subprocess, "Popen", Popen):
for _ in test_lib.TestFlowHelper(
"LaunchBinary", client_mock, client_id=self.client_id,
binary=upload_path, command_line="--value 356", token=self.token):
pass
# Check that the executable file contains the code string.
self.assertEqual(Popen.binary, code)
# At this point, the actual binary should have been cleaned up by the
# client action so it should not exist.
self.assertRaises(IOError, open, Popen.running_args[0])
# Check the binary was run with the correct command line.
self.assertEqual(Popen.running_args[1], "--value")
self.assertEqual(Popen.running_args[2], "356")
# Check the command was in the tmp file.
self.assertTrue(Popen.running_args[0].startswith(
config_lib.CONFIG["Client.tempdir"]))
def testGetClientStats(self):
class ClientMock(object):
def GetClientStats(self, _):
"""Fake get client stats method."""
response = rdfvalue.ClientStats()
for i in range(12):
sample = rdfvalue.CpuSample(
timestamp=int(i * 10 * 1e6),
user_cpu_time=10 + i,
system_cpu_time=20 + i,
cpu_percent=10 + i)
response.cpu_samples.Append(sample)
sample = rdfvalue.IOSample(
timestamp=int(i * 10 * 1e6),
read_bytes=10 + i,
write_bytes=10 + i)
response.io_samples.Append(sample)
return [response]
for _ in test_lib.TestFlowHelper("GetClientStats", ClientMock(),
token=self.token,
client_id=self.client_id):
pass
urn = self.client_id.Add("stats")
stats_fd = aff4.FACTORY.Create(urn, "ClientStats", token=self.token,
mode="rw")
sample = stats_fd.Get(stats_fd.Schema.STATS)
# Samples are taken at the following timestamps and should be split into 2
# bins as follows (sample_interval is 60000000):
# 00000000, 10000000, 20000000, 30000000, 40000000, 50000000 -> Bin 1
# 60000000, 70000000, 80000000, 90000000, 100000000, 110000000 -> Bin 2
self.assertEqual(len(sample.cpu_samples), 2)
self.assertEqual(len(sample.io_samples), 2)
self.assertAlmostEqual(sample.io_samples[0].read_bytes, 15.0)
self.assertAlmostEqual(sample.io_samples[1].read_bytes, 21.0)
self.assertAlmostEqual(sample.cpu_samples[0].cpu_percent,
sum(range(10, 16))/6.0)
self.assertAlmostEqual(sample.cpu_samples[1].cpu_percent,
sum(range(16, 22))/6.0)
self.assertAlmostEqual(sample.cpu_samples[0].user_cpu_time, 15.0)
self.assertAlmostEqual(sample.cpu_samples[1].system_cpu_time, 31.0)
class TestApplyLabelsToClientsFlow(AdministrativeFlowTests):
"""Tests for ApplyLabelsToClientsFlow."""
def GetClientLabels(self, client_id):
fd = aff4.FACTORY.Open(client_id, aff4_type="VFSGRRClient",
token=self.token)
return list(fd.Get(fd.Schema.LABELS,
rdfvalue.AFF4ObjectLabelsList()).labels)
def testAppliesSingleLabelToSingleClient(self):
client_id = self.SetupClients(1)[0]
self.assertFalse(self.GetClientLabels(client_id))
with test_lib.FakeTime(42):
flow.GRRFlow.StartFlow(flow_name="ApplyLabelsToClientsFlow",
clients=[client_id],
labels=["foo"],
token=self.token)
self.assertListEqual(
self.GetClientLabels(client_id),
[rdfvalue.AFF4ObjectLabel(
name="foo", owner="test",
timestamp=rdfvalue.RDFDatetime().FromSecondsFromEpoch(42))])
def testAppliesSingleLabelToMultipleClients(self):
client_ids = self.SetupClients(3)
for client_id in client_ids:
self.assertFalse(self.GetClientLabels(client_id))
with test_lib.FakeTime(42):
flow.GRRFlow.StartFlow(flow_name="ApplyLabelsToClientsFlow",
clients=client_ids,
labels=["foo"],
token=self.token)
for client_id in client_ids:
self.assertListEqual(
self.GetClientLabels(client_id),
[rdfvalue.AFF4ObjectLabel(
name="foo", owner="test",
timestamp=rdfvalue.RDFDatetime().FromSecondsFromEpoch(42))])
def testAppliesMultipleLabelsToSingleClient(self):
client_id = self.SetupClients(1)[0]
self.assertFalse(self.GetClientLabels(client_id))
with test_lib.FakeTime(42):
flow.GRRFlow.StartFlow(flow_name="ApplyLabelsToClientsFlow",
clients=[client_id],
labels=["drei", "ein", "zwei"],
token=self.token)
self.assertListEqual(
sorted(self.GetClientLabels(client_id),
key=lambda label: label.name),
[rdfvalue.AFF4ObjectLabel(
name="drei", owner="test",
timestamp=rdfvalue.RDFDatetime().FromSecondsFromEpoch(42)),
rdfvalue.AFF4ObjectLabel(
name="ein", owner="test",
timestamp=rdfvalue.RDFDatetime().FromSecondsFromEpoch(42)),
rdfvalue.AFF4ObjectLabel(
name="zwei", owner="test",
timestamp=rdfvalue.RDFDatetime().FromSecondsFromEpoch(42))])
def testAppliesMultipleLabelsToMultipleClients(self):
client_ids = self.SetupClients(3)
for client_id in client_ids:
self.assertFalse(self.GetClientLabels(client_id))
with test_lib.FakeTime(42):
flow.GRRFlow.StartFlow(flow_name="ApplyLabelsToClientsFlow",
clients=client_ids,
labels=["drei", "ein", "zwei"],
token=self.token)
for client_id in client_ids:
self.assertListEqual(
sorted(self.GetClientLabels(client_id),
key=lambda label: label.name),
[rdfvalue.AFF4ObjectLabel(
name="drei", owner="test",
timestamp=rdfvalue.RDFDatetime().FromSecondsFromEpoch(42)),
rdfvalue.AFF4ObjectLabel(
name="ein", owner="test",
timestamp=rdfvalue.RDFDatetime().FromSecondsFromEpoch(42)),
rdfvalue.AFF4ObjectLabel(
name="zwei", owner="test",
timestamp=rdfvalue.RDFDatetime().FromSecondsFromEpoch(42))])
def testAuditEntryIsCreatedForEveryClient(self):
client_ids = self.SetupClients(3)
flow.GRRFlow.StartFlow(flow_name="ApplyLabelsToClientsFlow",
clients=client_ids,
labels=["drei", "ein", "zwei"],
token=self.token)
mock_worker = test_lib.MockWorker(token=self.token)
mock_worker.Simulate()
fd = aff4.FACTORY.Open("aff4:/audit/log", token=self.token)
for client_id in client_ids:
found_event = None
for event in fd:
if (event.action == rdfvalue.AuditEvent.Action.CLIENT_ADD_LABEL and
event.client == rdfvalue.ClientURN(client_id)):
found_event = event
break
self.assertFalse(found_event is None)
self.assertEqual(found_event.flow_name, "ApplyLabelsToClientsFlow")
self.assertEqual(found_event.user, self.token.username)
self.assertEqual(found_event.description, "test.drei,test.ein,test.zwei")
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| |
import yaml
class RoleBuilder():
# Global list of roles
ROLE_LIST = ['clc',
'user-facing',
'console',
'walrus',
'cluster-controller',
'storage-controller',
'node-controller',
'midonet-api',
'midolman',
'mido-zookeeper',
'mido-cassandra',
'mon-bootstrap',
'ceph-mons',
'ceph-osds',
'riak-head',
'riak-node',
'haproxy',
'nginx',
'all']
def __init__(self, environment_file='environment.yml'):
self.environment_file = environment_file
self.env_dict = self.get_all_attributes()
self.roles = self.get_roles()
self.all_hosts = self.roles['all']
def read_environment(self):
with open(self.environment_file) as env_file:
return yaml.load(env_file.read())
def get_all_attributes(self):
env_dicts = self.read_environment()
return env_dicts['default_attributes']
def get_euca_attributes(self):
try:
return self.env_dict['eucalyptus']
except:
return None
def get_riak_attributes(self):
try:
return self.env_dict['riakcs_cluster']
except:
return None
def get_ceph_attributes(self):
try:
return self.env_dict['ceph']
except:
return None
def _initialize_roles(self):
roles = {}
for role in self.ROLE_LIST:
roles[role] = set()
return roles
def get_euca_hosts(self):
roles = self.get_roles()
# Create set of Eucalytpus only componnents
euca_components = ['user-facing', 'cluster-controller',
'storage-controller', 'node-controller']
if roles['walrus']:
euca_components.append('walrus')
all_hosts = roles['clc']
for component in euca_components:
all_hosts.update(roles[component])
return all_hosts
def get_roles(self):
roles = self._initialize_roles()
euca_attributes = self.get_euca_attributes()
ceph_attributes = self.get_ceph_attributes()
riak_attributes = self.get_riak_attributes()
roles['all'] = set([])
if riak_attributes:
riak_topology = riak_attributes['topology']
if riak_topology['head']:
roles['riak-head'] = set([riak_topology['head']['ipaddr']])
roles['all'].add(riak_topology['head']['ipaddr'])
else:
raise Exception("No head node found for RiakCS cluster!")
if riak_topology.get('nodes'):
for n in riak_topology['nodes']:
roles['riak-node'].add(n)
roles['all'].add(n)
if riak_topology.get('load_balancer'):
riak_lb = None
if self.env_dict.get('nginx'):
riak_lb = 'nginx'
raise Exception("Nginx: Not implemented yet.")
elif self.env_dict.get('haproxy'):
riak_lb = 'haproxy'
else:
raise Exception("No Load-Balancer found for RiakCS cluster.")
roles[riak_lb] = set([riak_topology['load_balancer']])
roles['all'].add(riak_topology['load_balancer'])
if ceph_attributes:
ceph_topology = ceph_attributes['topology']
if ceph_topology.get('mons'):
mon_bootstrap = set()
monset = set()
for mon in ceph_topology['mons']:
if mon.get('init') and not mon_bootstrap:
mon_bootstrap.add(mon['ipaddr'])
monset.add(mon['ipaddr'])
roles['all'].add(mon['ipaddr'])
if not mon_bootstrap:
raise Exception("No Initial Ceph Monitor found! Please mention at least one initial monitor.\n"
"e.g\n"
"mons:\n"
" - ipaddr: '10.10.1.5'\n"
" hostname: 'node1'\n"
" init: true")
roles['ceph-mons'] = monset
roles['mon-bootstrap'] = mon_bootstrap
if ceph_topology['osds']:
osdset = set()
for osd in ceph_topology['osds']:
osdset.add(osd['ipaddr'])
roles['all'].add(osd['ipaddr'])
roles['ceph-osds'] = osdset
else:
raise Exception("No OSD Found!")
if euca_attributes:
topology = euca_attributes['topology']
# Add CLC
roles['clc'] = set([topology['clc-1']])
roles['all'].add(topology['clc-1'])
# Add UFS
roles['user-facing'] = set(topology['user-facing'])
for ufs in topology['user-facing']:
roles['all'].add(ufs)
# add console
if 'console' in topology:
roles['console'] = set(topology['console'])
for console in topology['console']:
roles['all'].add(console)
# Add Walrus
if 'walrus' in topology:
roles['walrus'] = set([topology['walrus']])
roles['all'].add(topology['walrus'])
else:
# No walrus defined assuming RiakCS
roles['walrus'] = set()
# Add cluster level components
for name in topology['clusters']:
roles['cluster'] = {}
if 'cc-1' in topology['clusters'][name]:
cc = topology['clusters'][name]['cc-1']
roles['cluster-controller'].add(cc)
else:
raise IndexError("Unable to find CC in topology for cluster " + name)
if 'sc-1' in topology['clusters'][name]:
sc = topology['clusters'][name]['sc-1']
roles['storage-controller'].add(sc)
else:
raise IndexError("Unable to find SC in topology for cluster " + name)
roles['cluster'][name] = set([cc, sc])
if 'nodes' in topology['clusters'][name]:
nodes = topology['clusters'][name]['nodes'].split()
else:
raise IndexError("Unable to find nodes in topology for cluster " + name)
for node in nodes:
roles['node-controller'].add(node)
roles['cluster'][name].add(node)
roles['all'].update(roles['cluster'][name])
# Add Midokura roles
midokura_attributes = self.env_dict.get('midokura', None)
if midokura_attributes and euca_attributes['network']['mode'] == 'VPCMIDO':
mido = euca_attributes['network']['config-json']['Mido']
mido_gw_hostname = mido.get('EucanetdHost', None)
midolman_host_mapping = midokura_attributes.get('midolman-host-mapping', None)
if midolman_host_mapping:
mido_api_ip = midolman_host_mapping.get(mido_gw_hostname, None)
if not mido_api_ip:
raise Exception('Unable to find midonet-api ({0}) host '
'in midolman-host-mapping'.format(mido_gw_hostname))
# Add the host IP for the midonet gw
roles['midonet-api'].add(mido_api_ip)
# Add hosts from the midonet host mapping, and all nodes
for hostname, host_ip in midolman_host_mapping.iteritems():
roles['midolman'].add(host_ip)
for node in roles['node-controller']:
roles['midolman'].add(node)
for host in self.env_dict.get('midokura', {}).get('zookeepers', []):
roles['mido-zookeeper'].add(str(host).split(':')[0])
roles['all'].add(str(host).split(':')[0])
for host in self.env_dict.get('midokura', {}).get('cassandras', []):
roles['mido-cassandra'].add(host)
roles['all'].add(host)
return roles
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) The James Hutton Institute 2017-2019
# (c) University of Strathclyde 2019-2020
# Author: Leighton Pritchard
#
# Contact:
# leighton.pritchard@strath.ac.uk
#
# Leighton Pritchard,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# 161 Cathedral Street,
# Glasgow,
# G4 0RE
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2017-2019 The James Hutton Institute
# Copyright (c) 2019-2020 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Provides the anim subcommand for pyani."""
import datetime
import logging
from argparse import Namespace
from itertools import combinations
from pathlib import Path
from typing import List, NamedTuple, Tuple
from tqdm import tqdm
from pyani import (
PyaniException,
anim,
pyani_config,
pyani_jobs,
run_sge,
run_multiprocessing as run_mp,
)
from pyani.pyani_files import collect_existing_output
from pyani.pyani_orm import (
Comparison,
PyaniORMException,
add_run,
add_run_genomes,
filter_existing_comparisons,
get_session,
update_comparison_matrices,
)
from pyani.pyani_tools import termcolor
# Convenience struct describing a pairwise comparison job for the SQLAlchemy
# implementation
class ComparisonJob(NamedTuple):
"""Pairwise comparison job for the SQLAlchemy implementation."""
query: str
subject: str
filtercmd: str
nucmercmd: str
outfile: Path
job: pyani_jobs.Job
# Convenience struct describing an analysis run
class RunData(NamedTuple):
"""Convenience struct describing an analysis run."""
method: str
name: str
date: datetime.datetime
cmdline: str
class ComparisonResult(NamedTuple):
"""Convenience struct for a single nucmer comparison result."""
qid: float
sid: float
aln_length: int
sim_errs: int
pid: float
qlen: int
slen: int
qcov: float
scov: float
class ProgData(NamedTuple):
"""Convenience struct for comparison program data/info."""
program: str
version: str
class ProgParams(NamedTuple):
"""Convenience struct for comparison parameters.
Use default of zero for fragsize or else db queries will not work
as SQLite/Python nulls do not match up well
"""
fragsize: str
maxmatch: bool
def subcmd_anim(args: Namespace) -> None:
"""Perform ANIm on all genome files in an input directory.
:param args: Namespace, command-line arguments
Finds ANI by the ANIm method, as described in Richter et al (2009)
Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106.
All FASTA format files (selected by suffix) in the input directory
are compared against each other, pairwise, using NUCmer (whose path must
be provided).
For each pairwise comparison, the NUCmer .delta file output is parsed to
obtain an alignment length and similarity error count for every unique
region alignment between the two organisms, as represented by
sequences in the FASTA files. These are processed to calculated aligned
sequence lengths, average nucleotide identity (ANI) percentages, coverage
(aligned percentage of whole genome - forward direction), and similarity
error count for each pairwise comparison.
The calculated values are deposited in the SQLite3 database being used for
the analysis.
For each pairwise comparison the NUCmer output is stored in the output
directory for long enough to extract summary information, but for each run
the output is gzip compressed. Once all runs are complete, the outputs
for each comparison are concatenated into a single gzip archive.
"""
# Create logger
logger = logging.getLogger(__name__)
# Announce the analysis
logger.info(termcolor("Running ANIm analysis", bold=True))
# Get current nucmer version
nucmer_version = anim.get_version(args.nucmer_exe)
logger.info(termcolor("MUMMer nucmer version: %s", "cyan"), nucmer_version)
# Use the provided name or make one for the analysis
start_time = datetime.datetime.now()
name = args.name or "_".join(["ANIm", start_time.isoformat()])
logger.info(termcolor("Analysis name: %s", "cyan"), name)
# Get connection to existing database. This may or may not have data
logger.debug("Connecting to database %s", args.dbpath)
try:
session = get_session(args.dbpath)
except Exception:
logger.error(
"Could not connect to database %s (exiting)", args.dbpath, exc_info=True
)
raise SystemExit(1)
# Add information about this run to the database
logger.debug("Adding run info to database %s...", args.dbpath)
try:
run, run_id = add_run(
session,
method="ANIm",
cmdline=args.cmdline,
date=start_time,
status="started",
name=name,
)
except PyaniORMException:
logger.error(
"Could not add run %s to the database (exiting)", run_id, exc_info=True
)
raise SystemExit(1)
logger.debug("...added run ID: %s to the database", run_id)
# Identify input files for comparison, and populate the database
logger.debug("Adding genomes for run %s to database...", run_id)
try:
genome_ids = add_run_genomes(
session, run, args.indir, args.classes, args.labels
)
except PyaniORMException:
logger.error("Could not add genomes to database for run %s (exiting)", run_id)
raise SystemExit(1)
logger.debug("\t...added genome IDs: %s", genome_ids)
# Generate commandlines for NUCmer analysis and output compression
logger.info("Generating ANIm command-lines")
deltadir = args.outdir / pyani_config.ALIGNDIR["ANIm"]
logger.debug("NUCmer output will be written temporarily to %s", deltadir)
# Create output directories
logger.debug("Creating output directory %s", deltadir)
try:
deltadir.mkdir(exist_ok=True, parents=True)
except IOError:
logger.error(
"Could not create output directory %s (exiting)", deltadir, exc_info=True
)
raise SystemError(1)
# Get list of genome IDs for this analysis from the database
logger.info("Compiling genomes for comparison")
genomes = run.genomes.all()
logger.debug("Collected %s genomes for this run", len(genomes))
# Generate all pair combinations of genome IDs as a list of (Genome, Genome) tuples
logger.info(
"Compiling pairwise comparisons (this can take time for large datasets)..."
)
comparisons = list(combinations(tqdm(genomes, disable=args.disable_tqdm), 2))
logger.info("\t...total pairwise comparisons to be performed: %s", len(comparisons))
# Check for existing comparisons; if one has been done (for the same
# software package, version, and setting) we add the comparison to this run,
# but remove it from the list of comparisons to be performed
logger.info("Checking database for existing comparison data...")
comparisons_to_run = filter_existing_comparisons(
session,
run,
comparisons,
"nucmer",
nucmer_version,
fragsize=None,
maxmatch=args.maxmatch,
)
logger.info(
"\t...after check, still need to run %s comparisons", len(comparisons_to_run)
)
# If there are no comparisons to run, update the Run matrices and exit
# from this function
if not comparisons_to_run:
logger.info(
termcolor(
"All comparison results present in database (skipping comparisons)",
"magenta",
)
)
logger.info("Updating summary matrices with existing results")
update_comparison_matrices(session, run)
return
# If we are in recovery mode, we are salvaging output from a previous
# run, and do not necessarily need to rerun all the jobs. In this case,
# we prepare a list of output files we want to recover from the results
# in the output directory.
if args.recovery:
logger.warning("Entering recovery mode")
logger.debug(
"\tIn this mode, existing comparison output from %s is reused", deltadir
)
existingfiles = collect_existing_output(deltadir, "nucmer", args)
if existingfiles:
logger.info(
"Recover mode identified %s existing output files for reuse: %s (etc)",
len(existingfiles),
existingfiles[0],
)
else:
logger.info("Recovery mode identified no existing output files")
else:
existingfiles = list()
logger.debug("\tAssuming no pre-existing output files")
# Create list of NUCmer jobs for each comparison still to be performed
logger.info("Creating NUCmer jobs for ANIm")
joblist = generate_joblist(comparisons_to_run, existingfiles, args)
logger.debug(
"Generated %s jobs, %s comparisons", len(joblist), len(comparisons_to_run)
)
# Pass jobs to appropriate scheduler
logger.debug("Passing %s jobs to %s...", len(joblist), args.scheduler)
run_anim_jobs(joblist, args)
logger.info("...jobs complete")
# Process output and add results to database
# This requires us to drop out of threading/multiprocessing: Python's SQLite3
# interface doesn't allow sharing connections and cursors
logger.info("Adding comparison results to database...")
update_comparison_results(joblist, run, session, nucmer_version, args)
update_comparison_matrices(session, run)
logger.info("...database updated.")
def generate_joblist(
comparisons: List[Tuple], existingfiles: List[Path], args: Namespace
) -> List[ComparisonJob]:
"""Return list of ComparisonJobs.
:param comparisons: list of (Genome, Genome) tuples
:param existingfiles: list of pre-existing nucmer output files
:param args: Namespace of command-line arguments for the run
"""
logger = logging.getLogger(__name__)
existingfiles = set(existingfiles) # Path objects hashable
joblist = [] # will hold ComparisonJob structs
jobs = {"new": 0, "old": 0} # will hold counts of new/old jobs for reporting
for idx, (query, subject) in enumerate(
tqdm(comparisons, disable=args.disable_tqdm)
):
ncmd, dcmd = anim.construct_nucmer_cmdline(
query.path,
subject.path,
args.outdir,
args.nucmer_exe,
args.filter_exe,
args.maxmatch,
)
logger.debug("Commands to run:\n\t%s\n\t%s", ncmd, dcmd)
outprefix = ncmd.split()[3] # prefix for NUCmer output
if args.nofilter:
outfname = Path(outprefix + ".delta")
else:
outfname = Path(outprefix + ".filter")
logger.debug("Expected output file for db: %s", outfname)
# If we're in recovery mode, we don't want to repeat a computational
# comparison that already exists, so we check whether the ultimate
# output is in the set of existing files and, if not, we add the jobs
# The comparisons collections always gets updated, so that results are
# added to the database whether they come from recovery mode or are run
# in this call of the script.
if args.recovery and outfname in existingfiles:
logger.debug("Recovering output from %s, not submitting job", outfname)
# Need to track the expected output, but set the job itself to None:
joblist.append(ComparisonJob(query, subject, dcmd, ncmd, outfname, None))
jobs["old"] += 1
else:
logger.debug("Building job")
# Build jobs
njob = pyani_jobs.Job("%s_%06d-n" % (args.jobprefix, idx), ncmd)
fjob = pyani_jobs.Job("%s_%06d-f" % (args.jobprefix, idx), dcmd)
fjob.add_dependency(njob)
joblist.append(ComparisonJob(query, subject, dcmd, ncmd, outfname, fjob))
jobs["new"] += 1
logger.info(
"Results not found for %d comparisons; %d new jobs built.",
jobs["new"],
jobs["new"],
)
if existingfiles:
logger.info("Retrieving results for %d previous comparisons.", jobs["old"])
return joblist
def run_anim_jobs(joblist: List[ComparisonJob], args: Namespace) -> None:
"""Pass ANIm nucmer jobs to the scheduler.
:param joblist: list of ComparisonJob namedtuples
:param args: command-line arguments for the run
"""
logger = logging.getLogger(__name__)
logger.debug("Scheduler: %s", args.scheduler)
# Entries with None seen in recovery mode:
jobs = [_.job for _ in joblist if _.job]
if args.scheduler == "multiprocessing":
logger.info("Running jobs with multiprocessing")
if not args.workers:
logger.debug("(using maximum number of worker threads)")
else:
logger.debug("(using %d worker threads, if available)", args.workers)
cumval = run_mp.run_dependency_graph(jobs, workers=args.workers)
if cumval > 0:
logger.error(
"At least one NUCmer comparison failed. Please investigate (exiting)"
)
raise PyaniException("Multiprocessing run failed in ANIm")
logger.info("Multiprocessing run completed without error")
elif args.scheduler.lower() == "sge":
logger.info("Running jobs with SGE")
logger.debug("Setting jobarray group size to %d", args.sgegroupsize)
logger.debug("Joblist contains %d jobs", len(joblist))
run_sge.run_dependency_graph(
jobs,
jgprefix=args.jobprefix,
sgegroupsize=args.sgegroupsize,
sgeargs=args.sgeargs,
)
else:
logger.error(termcolor("Scheduler %s not recognised", "red"), args.scheduler)
raise SystemError(1)
def update_comparison_results(
joblist: List[ComparisonJob], run, session, nucmer_version: str, args: Namespace
) -> None:
"""Update the Comparison table with the completed result set.
:param joblist: list of ComparisonJob namedtuples
:param run: Run ORM object for the current ANIm run
:param session: active pyanidb session via ORM
:param nucmer_version: version of nucmer used for the comparison
:param args: command-line arguments for this run
The Comparison table stores individual comparison results, one per row.
"""
logger = logging.getLogger(__name__)
# Add individual results to Comparison table
for job in tqdm(joblist, disable=args.disable_tqdm):
logger.debug("\t%s vs %s", job.query.description, job.subject.description)
aln_length, sim_errs = anim.parse_delta(job.outfile)
qcov = aln_length / job.query.length
scov = aln_length / job.subject.length
try:
pid = 1 - sim_errs / aln_length
except ZeroDivisionError: # aln_length was zero (no alignment)
pid = 0
run.comparisons.append(
Comparison(
query=job.query,
subject=job.subject,
aln_length=aln_length,
sim_errs=sim_errs,
identity=pid,
cov_query=qcov,
cov_subject=scov,
program="nucmer",
version=nucmer_version,
fragsize=None,
maxmatch=args.maxmatch,
)
)
# Populate db
logger.debug("Committing results to database")
session.commit()
| |
# Lint as: python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An XLA client in Python."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import collections
import contextlib
import enum # pylint: disable=g-bad-import-order
import gzip
import inspect
import os
from typing import List, Sequence, Tuple, Union
from absl import logging
import numpy as np
# Note this module does *not* depend on any Python protocol buffers. The XLA
# Python bindings are currently packaged both as part of jaxlib and as part
# of TensorFlow. If we use protocol buffers here, then importing both jaxlib
# and TensorFlow may fail with duplicate protocol buffer message definitions.
from tensorflow.compiler.xla.python import xla_extension as _xla
# Most functions are snake_case for consistency with other modules, some
# method names are CamelCase for consistency with XLA.
# pylint: disable=invalid-name
# Pylint has false positives for type annotations.
# pylint: disable=invalid-sequence-index
ops = _xla.ops
profiler = _xla.profiler
xla_platform_names = {
'cpu': 'Host',
'gpu': 'CUDA',
}
def _interpreter_backend_factory():
return _xla.get_interpreter_client()
def _cpu_backend_factory():
return _xla.get_cpu_client(asynchronous=True)
def _gpu_backend_factory(distributed_client=None, node_id=0):
"""Returns a GPU backend. BFC allocator is used by default."""
allocator = os.getenv('XLA_PYTHON_CLIENT_ALLOCATOR', 'default').lower()
memory_fraction = os.getenv('XLA_PYTHON_CLIENT_MEM_FRACTION')
preallocate = os.getenv('XLA_PYTHON_CLIENT_PREALLOCATE')
if allocator not in ('default', 'platform', 'bfc'):
raise ValueError(
'XLA_PYTHON_CLIENT_ALLOCATOR env var must be "default", "platform", or '
'"bfc", got "%s"' % allocator)
config = _xla.GpuAllocatorConfig()
if allocator == 'default':
config.kind = _xla.GpuAllocatorConfig.Kind.DEFAULT
if allocator == 'platform':
config.kind = _xla.GpuAllocatorConfig.Kind.PLATFORM
if allocator == 'bfc':
config.kind = _xla.GpuAllocatorConfig.Kind.BFC
if memory_fraction:
config.memory_fraction = float(memory_fraction)
config.preallocate = preallocate not in ('0', 'false', 'False')
return _xla.get_nvidia_gpu_client(
asynchronous=True,
allocator_config=config,
distributed_client=distributed_client,
node_id=node_id)
def _tpu_backend_factory():
return _xla.get_tpu_client(asynchronous=True)
# Backend factories, keyed by user-visible name, in increasing priority order.
_local_backend_factories = collections.OrderedDict([
('interpreter', _interpreter_backend_factory),
('cpu', _cpu_backend_factory),
('gpu', _gpu_backend_factory),
('tpu', _tpu_backend_factory),
])
def register_local_backend_factory(name, factory):
_local_backend_factories[name] = factory
_local_backends = None
def _get_local_backends():
"""Instantiates all known local backends."""
global _local_backends
if _local_backends is not None:
return _local_backends
_local_backends = collections.OrderedDict()
for name, factory in _local_backend_factories.items():
logging.vlog(1, "Initializing backend '%s'" % name)
try:
backend = factory()
except RuntimeError as err:
if name == 'cpu':
# We always expect CPU to initialize successfully.
raise
else:
# If the backend isn't built into the binary, or if it has no devices,
# we expect a RuntimeError.
logging.vlog(1, "Error initializing backend '%s': %s" % (name, err))
continue
_local_backends[name] = backend
return _local_backends
def get_local_backend(name=None):
"""Returns a local backend.
Args:
name: the backend name. If `None`, a default local backend is returned,
typically `gpu` if one is present, or `cpu` if not. If a string, the named
backend is returned or an exception raised.
Returns:
A LocalBackend object.
"""
backends = _get_local_backends()
if name is not None:
try:
return backends[name]
except KeyError:
raise RuntimeError(
'Unknown backend %s. Available: %s' % (name, list(backends.keys())))
return list(backends.values())[-1]
class OpMetadata(object):
"""Python representation of a xla.OpMetadata protobuf."""
__slots__ = ('op_type', 'op_name', 'source_file', 'source_line')
def __init__(self, op_type='', op_name='', source_file='', source_line=0):
self.op_type = op_type
self.op_name = op_name
self.source_file = source_file
self.source_line = source_line
def CurrentSourceInfoMetadata(op_type=None, op_name=None, skip_frames=1):
"""Helper for use in source mapping that returns an OpMetadata object."""
full_filename, lineno = inspect.stack()[skip_frames][1:3]
filename = os.path.basename(full_filename)
return OpMetadata(
op_type=op_type,
op_name=op_name,
source_file=filename,
source_line=lineno)
PrimitiveType = _xla.PrimitiveType
bfloat16 = _xla.bfloat16_dtype()
XLA_ELEMENT_TYPE_TO_DTYPE = {
PrimitiveType.PRED: np.dtype('bool'),
PrimitiveType.S8: np.dtype('int8'),
PrimitiveType.S16: np.dtype('int16'),
PrimitiveType.S32: np.dtype('int32'),
PrimitiveType.S64: np.dtype('int64'),
PrimitiveType.U8: np.dtype('uint8'),
PrimitiveType.U16: np.dtype('uint16'),
PrimitiveType.U32: np.dtype('uint32'),
PrimitiveType.U64: np.dtype('uint64'),
PrimitiveType.BF16: np.dtype(bfloat16),
PrimitiveType.F16: np.dtype('float16'),
PrimitiveType.F32: np.dtype('float32'),
PrimitiveType.F64: np.dtype('float64'),
PrimitiveType.C64: np.dtype('complex64'),
PrimitiveType.C128: np.dtype('complex128'),
PrimitiveType.TUPLE: np.dtype(np.object_),
PrimitiveType.TOKEN: np.dtype(np.object_),
}
# Note the conversion on the key. Numpy has a known issue wherein dtype hashing
# doesn't work as expected (https://github.com/numpy/numpy/issues/7242). Thus,
# when keying by dtype in this dict, we use the string form of dtypes.
DTYPE_TO_XLA_ELEMENT_TYPE = {
str(dt): et for et, dt in XLA_ELEMENT_TYPE_TO_DTYPE.items()
}
def dtype_to_etype(dtype):
"""Convenience function for reading DTYPE_TO_XLA_ELEMENT_TYPE."""
return DTYPE_TO_XLA_ELEMENT_TYPE[str(np.dtype(dtype))]
Shape = _xla.Shape
Shape.__doc__ = """
A Shape is an object defined in C++ that duck types like the following class:
class Shape(object):
'''Represents an XLA shape.
A shape is either an array shape, having rank-many integer
dimensions and an element type (represented by a Numpy dtype), or it
is a tuple shape, having a shape for every tuple component:
type shape =
TupleShape of shape list
| ArrayShape of { dimensions: int list; element_type: dtype }
'''
@staticmethod
def tuple_shape(tuple_shapes) -> Shape:
"Construct a tuple shape."
@staticmethod
def array_shape(element_type, dimensions, minor_to_major=None) -> Shape:
@staticmethod
def from_pyval(pyval) -> Shape:
"Returns a Shape that describes a tuple-tree of Numpy arrays."
def __init__(self, str) -> Shape:
"Parses a shape string."
def __eq__(self, other: Shape) -> bool:
def __ne__(self, other: Shape) -> bool:
def __hash__(self):
def __repr__(self):
def is_tuple(self) -> bool:
def is_array(self) -> bool:
def tuple_shapes(self) -> [Shape]:
def numpy_dtype(self) -> np.dtype:
"Like element_type(), but returns dtype('O') for a tuple shape."
def xla_element_type(self) -> PrimitiveType:
def element_type(self) -> np.dtype:
def dimensions(self) -> (int, int, ...):
def rank(self) -> int:
def with_major_to_minor_layout_if_absent(self) -> Shape:
"Returns a copy with missing layouts set to major-to-minor."
def to_serialized_proto(self) -> bytes:
"Returns 'shape' as a serialized proto."
"""
ProgramShape = _xla.ProgramShape
ProgramShape.__doc__ = """
A ProgramShape is a C++ object that duck types like the following class.
class ProgramShape(object):
def __init__(self, parameter_shapes, result_shape):
def parameter_shapes(self) -> [Shape]:
def result_shape(self) -> Shape:
def __repr__(self):
"""
def shape_from_pyval(pyval):
"""Returns a Shape that describes a tuple-tree of Numpy arrays."""
def convert(pyval):
if isinstance(pyval, tuple):
return Shape.tuple_shape(tuple(convert(elt) for elt in pyval))
else:
return Shape.array_shape(pyval.dtype, np.shape(pyval))
return convert(pyval)
DeviceAssignment = _xla.DeviceAssignment
DeviceAssignment.__doc__ = """
A DeviceAssignment is a C++ object with the following signature.
def create(assignment):
'''Builds a device assignment.
Args:
assignment: a 2D numpy array of device ordinal integers, indexed by
[replica][computation_in_replica].
Returns:
A device assignment.
'''
def replica_count():
'''Returns the number of replicas.'''
def computation_count():
'''Returns the number of computations per replica.'''
"""
Device = _xla.Device
CompileOptions = _xla.CompileOptions
HostBufferSemantics = _xla.HostBufferSemantics
# An Executable is a C++ class that duck types with the following API:
# class Executable(object):
# def local_devices(self) -> [Device]:
# def execute(self, arguments : [Buffer]) -> Buffer:
# """Execute on one replica with Buffer arguments and return value."""
#
# def size_of_generated_code_in_bytes(self) -> int:
# """Return generated binary size, or -1 if not known."""
#
# def execute_on_local_devices(self, arguments: [[Buffer]]) -> [Buffer]:
# """Execute on many replicas with Buffer arguments and return value.
#
# Args:
# arguments: A sequence of sequences of Buffers. The i'th inner sequence
# comprises the arguments for execution on the i'th local device.
#
# Returns:
# A list of the computation's outputs for each local device, as a Buffer.
# If a shallow sequence of arguments was passed in for `arguments`, then
# the sole, zero'th device's output is returned instead, as a Buffer.
# """
#
# There are different implementations of Executable for different backends.
def execute_with_python_values(executable, arguments, backend):
"""Execute on one replica with Python values as arguments and output."""
def put(arg):
return backend.buffer_from_pyval(arg, device=executable.local_devices()[0])
arguments = [put(arg) for arg in arguments]
outputs = executable.execute(arguments)
return [x.to_py() for x in outputs]
def execute_with_python_values_replicated(executable, arguments, backend):
"""Execute on many replicas with Python values as arguments and output.
Arguments:
executable: the program to run.
arguments: a list of lists of Python values indexed by `[replica][arg_num]`
to pass as inputs.
backend: the backend we are targeting.
Returns:
A list of python values, one per replica.
"""
devices = executable.local_devices()
# pylint: disable=g-complex-comprehension
flat_args = [(arg, devices[replica])
for replica, replica_args in enumerate(arguments)
for arg in replica_args]
flat_arg_buffers = [
backend.buffer_from_pyval(pyval, device) for pyval, device in flat_args
]
arg_buffers = []
for replica_args in arguments:
arg_buffers.append(flat_arg_buffers[:len(replica_args)])
flat_arg_buffers = flat_arg_buffers[len(replica_args):]
return [[x.to_py()
for x in xs]
for xs in executable.execute_on_local_devices(arg_buffers)]
class PaddingType(enum.Enum):
VALID = 1
SAME = 2
def window_padding_type_to_pad_values(padding_type, lhs_dims, rhs_dims,
window_strides):
"""Maps PaddingType or string to pad values (list of pairs of ints)."""
if not isinstance(padding_type, (str, PaddingType)):
msg = 'padding_type must be str or PaddingType, got {}.'
raise TypeError(msg.format(type(padding_type)))
if isinstance(padding_type, str):
if padding_type.upper() == 'VALID':
padding_type = PaddingType.VALID
elif padding_type.upper() == 'SAME':
padding_type = PaddingType.SAME
else:
msg = 'Unknown padding type string: expected "VALID" or "SAME", got {}.'
raise ValueError(msg.format(padding_type))
if padding_type == PaddingType.VALID:
return [(0, 0)] * len(window_strides)
elif padding_type == PaddingType.SAME:
out_shape = np.ceil(np.true_divide(lhs_dims, window_strides)).astype(int)
pad_sizes = [
max((out_size - 1) * stride + filter_size - in_size, 0)
for out_size, stride, filter_size, in_size in zip(
out_shape, window_strides, rhs_dims, lhs_dims)
]
return [(pad_size // 2, pad_size - pad_size // 2) for pad_size in pad_sizes]
else:
msg = 'Unexpected PaddingType value: {}'
raise ValueError(msg.format(padding_type))
XlaBuilder = _xla.XlaBuilder
XlaComputation = _xla.XlaComputation
FftType = _xla.FftType
Client = _xla.Client
Buffer = _xla.Buffer
Executable = _xla.Executable
def register_custom_call_target(name, fn, platform='cpu'):
"""Registers a custom call target.
Args:
name: bytes containing the name of the function.
fn: a PyCapsule object containing the function pointer.
platform: the target platform.
"""
_xla.register_custom_call_target(name, fn, xla_platform_names[platform])
# Deprecated. Use register_custom_call_target instead.
register_cpu_custom_call_target = register_custom_call_target
class PaddingConfigDimension(object):
"""Python representation of a xla.PaddingConfigDimension protobuf."""
__slots__ = ('edge_padding_low', 'edge_padding_high', 'interior_padding')
def __init__(self):
self.edge_padding_low = 0
self.edge_padding_high = 0
self.interior_padding = 0
class PaddingConfig(object):
"""Python representation of a xla.PaddingConfig protobuf."""
__slots__ = ('dimensions',)
def __init__(self):
self.dimensions = []
def make_padding_config(
padding_config: Union[PaddingConfig, Sequence[Tuple[int, int, int]]]
) -> PaddingConfig:
"""Create PaddingConfig proto from list of triples of integers.
Args:
padding_config: either a PaddingConfig or a list of integer triples
(edge_padding_low, edge_padding_high, interior_padding) representing the
configuration of the padding operation.
Returns:
A `PaddingConfig` object.
"""
if isinstance(padding_config, tuple) or isinstance(padding_config, list):
triples = padding_config
padding_config = PaddingConfig()
for lo, hi, interior in triples:
dimension = PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
return padding_config
class DotDimensionNumbers(object):
"""Python representation of a xla.DotDimensionNumbers protobuf."""
__slots__ = ('lhs_contracting_dimensions', 'rhs_contracting_dimensions',
'lhs_batch_dimensions', 'rhs_batch_dimensions')
def __init__(self):
self.lhs_contracting_dimensions = []
self.rhs_contracting_dimensions = []
self.lhs_batch_dimensions = []
self.rhs_batch_dimensions = []
def make_dot_dimension_numbers(
dimension_numbers: Union[DotDimensionNumbers,
Tuple[Tuple[List[int], List[int]],
Tuple[List[int], List[int]]]]
) -> DotDimensionNumbers:
"""Builds a DotDimensionNumbers object from a specification.
Args:
dimension_numbers: either a `DotDimensionNumbers` or a nested tuple
`((lhs_contract, rhs_contract), (lhs_batch, rhs_batch))` of lists of
integers representing the dimensions to treat as contracting dimensions
and batch dimensions on each input operand.
Returns:
A `DotDimensionNumbers` object.
"""
if isinstance(dimension_numbers, (list, tuple)):
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
dot_dims_proto = DotDimensionNumbers()
dot_dims_proto.lhs_contracting_dimensions.extend(lhs_contract)
dot_dims_proto.rhs_contracting_dimensions.extend(rhs_contract)
dot_dims_proto.lhs_batch_dimensions.extend(lhs_batch)
dot_dims_proto.rhs_batch_dimensions.extend(rhs_batch)
return dot_dims_proto
else:
return dimension_numbers
class ConvolutionDimensionNumbers(object):
"""Python representation of a xla.ConvolutionDimensionNumbers protobuf."""
__slots__ = ('input_batch_dimension', 'input_feature_dimension',
'input_spatial_dimensions', 'kernel_input_feature_dimension',
'kernel_output_feature_dimension', 'kernel_spatial_dimensions',
'output_batch_dimension', 'output_feature_dimension',
'output_spatial_dimensions')
def __init__(self):
self.input_batch_dimension = 0
self.input_feature_dimension = 0
self.input_spatial_dimensions = []
self.kernel_input_feature_dimension = 0
self.kernel_output_feature_dimension = 0
self.kernel_spatial_dimensions = []
self.output_batch_dimension = 0
self.output_feature_dimension = 0
self.output_spatial_dimensions = []
def make_convolution_dimension_numbers(
dimension_numbers: Union[None, ConvolutionDimensionNumbers, Tuple[str, str,
str]],
num_spatial_dimensions: int) -> ConvolutionDimensionNumbers:
"""Builds a ConvolutionDimensionNumbers object from a specification.
Args:
dimension_numbers: optional, either a ConvolutionDimensionNumbers object or
a tuple (lhs_spec, rhs_spec, out_spec). Each element is a string of
length N+2 identifying by position: (1) batch dimensions in lhs, rhs, and
the output with the character 'N', (2) feature dimensions in lhs and the
output with the character 'C', (3) input and output feature dimensions
in rhs with the characters 'I' and 'O' respectively, and (4) spatial
dimension correspondences between lhs, rhs, and the output using any
distinct characters. For example, to indicate dimension numbers
consistent with the Conv operation with two spatial dimensions, one
could use ('NCHW', 'OIHW', 'NCHW'). As another example, to indicate
dimension numbers consistent with the TensorFlow Conv2D operation, one
could use ('NHWC', 'HWIO', 'NHWC'). When using the latter form of
convolution dimension specification, window strides are associated with
spatial dimension character labels according to the order in which the
labels appear in the rhs_spec string, so that window_strides[0] is
matched with the dimension corresponding to the first character
appearing in rhs_spec that is not 'I' or 'O'. By default, use the same
dimension numbering as Conv and ConvWithGeneralPadding.
num_spatial_dimensions: the number of spatial dimensions.
Returns:
A `ConvolutionDimensionNumbers` object.
"""
if dimension_numbers is None:
nd = num_spatial_dimensions
dimension_numbers = ConvolutionDimensionNumbers()
dimension_numbers.input_batch_dimension = 0
dimension_numbers.input_feature_dimension = 1
dimension_numbers.output_batch_dimension = 0
dimension_numbers.output_feature_dimension = 1
dimension_numbers.kernel_output_feature_dimension = 0
dimension_numbers.kernel_input_feature_dimension = 1
dimension_numbers.input_spatial_dimensions.extend(range(2, 2 + nd))
dimension_numbers.kernel_spatial_dimensions.extend(range(2, 2 + nd))
dimension_numbers.output_spatial_dimensions.extend(range(2, 2 + nd))
elif isinstance(dimension_numbers, tuple):
lhs_spec, rhs_spec, out_spec = dimension_numbers
dimension_numbers = ConvolutionDimensionNumbers()
dimension_numbers.input_batch_dimension = lhs_spec.index('N')
dimension_numbers.input_feature_dimension = lhs_spec.index('C')
dimension_numbers.output_batch_dimension = out_spec.index('N')
dimension_numbers.output_feature_dimension = out_spec.index('C')
dimension_numbers.kernel_output_feature_dimension = rhs_spec.index('O')
dimension_numbers.kernel_input_feature_dimension = rhs_spec.index('I')
dimension_numbers.kernel_spatial_dimensions.extend(
i for i, c in enumerate(rhs_spec) if c not in {'I', 'O'})
dimension_numbers.input_spatial_dimensions.extend(
sorted((i for i, c in enumerate(lhs_spec) if c not in {'N', 'C'}),
key=lambda i: rhs_spec.index(lhs_spec[i])))
dimension_numbers.output_spatial_dimensions.extend(
sorted((i for i, c in enumerate(out_spec) if c not in {'N', 'C'}),
key=lambda i: rhs_spec.index(out_spec[i])))
return dimension_numbers
class OpSharding(object):
"""Python representation of a xla.OpSharding protobuf."""
__slots__ = ('type', 'tile_assignment_dimensions', 'tile_assignment_devices',
'tuple_shardings')
Type = _xla.OpSharding_Type
def __init__(self):
self.type = self.Type.REPLICATED
self.tile_assignment_dimensions = []
self.tile_assignment_devices = []
self.tuple_shardings = []
class PrecisionConfig(object):
"""Python representation of a xla.PrecisionConfig protobuf."""
__slots__ = ('operand_precision',)
Precision = _xla.PrecisionConfig_Precision
def __init__(self):
self.operand_precision = []
class GatherDimensionNumbers(object):
"""Python representation of a xla.GatherDimensionNumbers protobuf."""
__slots__ = ('offset_dims', 'collapsed_slice_dims', 'start_index_map',
'index_vector_dim')
def __init__(self):
self.offset_dims = []
self.collapsed_slice_dims = []
self.start_index_map = []
self.index_vector_dim = 0
class ScatterDimensionNumbers(object):
"""Python representation of a xla.ScatterDimensionNumbers protobuf."""
__slots__ = ('update_window_dims', 'inserted_window_dims',
'scatter_dims_to_operand_dims', 'index_vector_dim')
def __init__(self):
self.update_window_dims = []
self.inserted_window_dims = []
self.scatter_dims_to_operand_dims = []
self.index_vector_dim = 0
class ReplicaGroup(object):
"""Python representation of a xla.ReplicaGroup protobuf."""
__slots__ = ('replica_ids',)
def __init__(self):
self.replica_ids = []
def _make_replica_group_proto(replica_group):
replica_group_proto = ReplicaGroup()
replica_group_proto.replica_ids.extend(replica_group)
return replica_group_proto
def make_replica_groups(replica_groups):
if replica_groups is None:
replica_groups_protos = [] # special value for XLA API
else:
replica_groups = list(replica_groups)
replica_groups_protos = [
_make_replica_group_proto(group) for group in replica_groups
]
return replica_groups_protos
Traceback = _xla.Traceback
@contextlib.contextmanager
def tracebacks(enabled=True):
"""Context manager that enables or disables traceback collection."""
saved = Traceback.enabled
Traceback.enabled = enabled
try:
yield
finally:
Traceback.enabled = saved
def heap_profile(client: Client) -> str:
"""Returns a gzipped pprof protocol buffer containing a heap profile."""
return gzip.compress(client.heap_profile())
# Perform one last garbage collection of deferred Python references. This is
# mostly to keep ASAN happy.
atexit.register(_xla.collect_garbage)
| |
#Embedded file name: ACEStream\Core\BitTornado\bencode.pyo
from types import IntType, LongType, StringType, ListType, TupleType, DictType
from ACEStream.Core.Utilities.odict import odict
try:
from types import BooleanType
except ImportError:
BooleanType = None
try:
from types import UnicodeType
except ImportError:
UnicodeType = None
from traceback import print_exc, print_stack
import sys
DEBUG = False
def decode_int(x, f, params = None):
f += 1
newf = x.index('e', f)
try:
n = int(x[f:newf])
except:
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f + 1:
raise ValueError
return (n, newf + 1)
def decode_string(x, f, params = None):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f + 1:
raise ValueError
colon += 1
return (x[colon:colon + n], colon + n)
def decode_unicode(x, f, params = None):
s, f = decode_string(x, f + 1)
return (s.decode('UTF-8'), f)
def decode_list(x, f, params = None):
r, f = [], f + 1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f, params)
r.append(v)
return (r, f + 1)
def decode_dict(x, f, params = None):
if params != None and 'use_ordered_dict' in params:
r = odict()
else:
r = {}
f = f + 1
lastkey = None
while x[f] != 'e':
k, f = decode_string(x, f)
lastkey = k
r[k], f = decode_func[x[f]](x, f, params)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
def bdecode(x, sloppy = 1, params = None):
try:
r, l = decode_func[x[0]](x, 0, params)
except (IndexError, KeyError, ValueError):
if DEBUG:
print_exc()
raise ValueError, 'bad bencoded data'
if not sloppy and l != len(x):
raise ValueError, 'bad bencoded data'
return r
def test_bdecode():
try:
bdecode('0:0:')
except ValueError:
pass
try:
bdecode('ie')
except ValueError:
pass
try:
bdecode('i341foo382e')
except ValueError:
pass
try:
bdecode('i-0e')
except ValueError:
pass
try:
bdecode('i123')
except ValueError:
pass
try:
bdecode('')
except ValueError:
pass
try:
bdecode('i6easd')
except ValueError:
pass
try:
bdecode('35208734823ljdahflajhdf')
except ValueError:
pass
try:
bdecode('2:abfdjslhfld')
except ValueError:
pass
try:
bdecode('02:xy')
except ValueError:
pass
try:
bdecode('l')
except ValueError:
pass
try:
bdecode('leanfdldjfh')
except ValueError:
pass
try:
bdecode('relwjhrlewjh')
except ValueError:
pass
try:
bdecode('d')
except ValueError:
pass
try:
bdecode('defoobar')
except ValueError:
pass
try:
bdecode('d3:fooe')
except ValueError:
pass
try:
bdecode('di1e0:e')
except ValueError:
pass
try:
bdecode('d1:b0:1:a0:e')
except ValueError:
pass
try:
bdecode('d1:a0:1:a0:e')
except ValueError:
pass
try:
bdecode('i03e')
except ValueError:
pass
try:
bdecode('l01:ae')
except ValueError:
pass
try:
bdecode('9999:x')
except ValueError:
pass
try:
bdecode('l0:')
except ValueError:
pass
try:
bdecode('d0:0:')
except ValueError:
pass
try:
bdecode('d0:')
except ValueError:
pass
bencached_marker = []
class Bencached:
def __init__(self, s):
self.marker = bencached_marker
self.bencoded = s
BencachedType = type(Bencached(''))
def encode_bencached(x, r, params = None):
r.append(x.bencoded)
def encode_int(x, r, params = None):
r.extend(('i', str(x), 'e'))
def encode_bool(x, r, params = None):
encode_int(int(x), r)
def encode_string(x, r, params = None):
r.extend((str(len(x)), ':', x))
def encode_unicode(x, r, params = None):
encode_string(x.encode('UTF-8'), r)
def encode_list(x, r, params = None):
r.append('l')
for e in x:
encode_func[type(e)](e, r)
r.append('e')
def encode_dict(x, r, params = None):
r.append('d')
ilist = x.items()
if params != None and 'skip_dict_sorting' in params:
pass
else:
ilist.sort()
for k, v in ilist:
if DEBUG:
print >> sys.stderr, 'bencode: Encoding', `k`, `v`
try:
r.extend((str(len(k)), ':', k))
except:
print >> sys.stderr, 'k: %s' % k
raise
encode_func[type(v)](v, r)
r.append('e')
encode_func = {}
encode_func[BencachedType] = encode_bencached
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
encode_func[odict] = encode_dict
if BooleanType:
encode_func[BooleanType] = encode_bool
if UnicodeType:
encode_func[UnicodeType] = encode_unicode
def bencode(x, params = None):
r = []
try:
encode_func[type(x)](x, r, params)
except:
print >> sys.stderr, 'bencode: *** error *** could not encode type %s (value: %s)' % (type(x), x)
print_stack()
print_exc()
try:
return ''.join(r)
except:
if DEBUG:
print >> sys.stderr, 'bencode: join error', x
for elem in r:
print >> sys.stderr, 'elem', elem, 'has type', type(elem)
print_exc()
return ''
def test_bencode():
try:
bencode({1: 'foo'})
except AssertionError:
pass
try:
import psyco
psyco.bind(bdecode)
psyco.bind(bencode)
except ImportError:
pass
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import flow
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dialogflow.cx.v3beta1",
manifest={
"CreateVersionOperationMetadata",
"Version",
"ListVersionsRequest",
"ListVersionsResponse",
"GetVersionRequest",
"CreateVersionRequest",
"UpdateVersionRequest",
"DeleteVersionRequest",
"LoadVersionRequest",
"CompareVersionsRequest",
"CompareVersionsResponse",
},
)
class CreateVersionOperationMetadata(proto.Message):
r"""Metadata associated with the long running operation for
[Versions.CreateVersion][google.cloud.dialogflow.cx.v3beta1.Versions.CreateVersion].
Attributes:
version (str):
Name of the created version. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
version = proto.Field(proto.STRING, number=1,)
class Version(proto.Message):
r"""Represents a version of a flow.
Attributes:
name (str):
Format: projects/<Project
ID>/locations/<Location ID>/agents/<Agent
ID>/flows/<Flow ID>/versions/<Version ID>.
Version ID is a self-increasing number generated
by Dialogflow upon version creation.
display_name (str):
Required. The human-readable name of the
version. Limit of 64 characters.
description (str):
The description of the version. The maximum
length is 500 characters. If exceeded, the
request is rejected.
nlu_settings (google.cloud.dialogflowcx_v3beta1.types.NluSettings):
Output only. The NLU settings of the flow at
version creation.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Create time of the version.
state (google.cloud.dialogflowcx_v3beta1.types.Version.State):
Output only. The state of this version. This
field is read-only and cannot be set by create
and update methods.
"""
class State(proto.Enum):
r"""The state of the version."""
STATE_UNSPECIFIED = 0
RUNNING = 1
SUCCEEDED = 2
FAILED = 3
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
nlu_settings = proto.Field(proto.MESSAGE, number=4, message=flow.NluSettings,)
create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
state = proto.Field(proto.ENUM, number=6, enum=State,)
class ListVersionsRequest(proto.Message):
r"""The request message for
[Versions.ListVersions][google.cloud.dialogflow.cx.v3beta1.Versions.ListVersions].
Attributes:
parent (str):
Required. The
[Flow][google.cloud.dialogflow.cx.v3beta1.Flow] to list all
versions for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
page_size (int):
The maximum number of items to return in a
single page. By default 20 and at most 100.
page_token (str):
The next_page_token value returned from a previous list
request.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListVersionsResponse(proto.Message):
r"""The response message for
[Versions.ListVersions][google.cloud.dialogflow.cx.v3beta1.Versions.ListVersions].
Attributes:
versions (Sequence[google.cloud.dialogflowcx_v3beta1.types.Version]):
A list of versions. There will be a maximum number of items
returned based on the page_size field in the request. The
list may in some cases be empty or contain fewer entries
than page_size even if this isn't the last page.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
"""
@property
def raw_page(self):
return self
versions = proto.RepeatedField(proto.MESSAGE, number=1, message="Version",)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetVersionRequest(proto.Message):
r"""The request message for
[Versions.GetVersion][google.cloud.dialogflow.cx.v3beta1.Versions.GetVersion].
Attributes:
name (str):
Required. The name of the
[Version][google.cloud.dialogflow.cx.v3beta1.Version].
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
name = proto.Field(proto.STRING, number=1,)
class CreateVersionRequest(proto.Message):
r"""The request message for
[Versions.CreateVersion][google.cloud.dialogflow.cx.v3beta1.Versions.CreateVersion].
Attributes:
parent (str):
Required. The
[Flow][google.cloud.dialogflow.cx.v3beta1.Flow] to create an
[Version][google.cloud.dialogflow.cx.v3beta1.Version] for.
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
version (google.cloud.dialogflowcx_v3beta1.types.Version):
Required. The version to create.
"""
parent = proto.Field(proto.STRING, number=1,)
version = proto.Field(proto.MESSAGE, number=2, message="Version",)
class UpdateVersionRequest(proto.Message):
r"""The request message for
[Versions.UpdateVersion][google.cloud.dialogflow.cx.v3beta1.Versions.UpdateVersion].
Attributes:
version (google.cloud.dialogflowcx_v3beta1.types.Version):
Required. The version to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which fields get updated.
Currently only ``description`` and ``display_name`` can be
updated.
"""
version = proto.Field(proto.MESSAGE, number=1, message="Version",)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class DeleteVersionRequest(proto.Message):
r"""The request message for
[Versions.DeleteVersion][google.cloud.dialogflow.cx.v3beta1.Versions.DeleteVersion].
Attributes:
name (str):
Required. The name of the
[Version][google.cloud.dialogflow.cx.v3beta1.Version] to
delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
"""
name = proto.Field(proto.STRING, number=1,)
class LoadVersionRequest(proto.Message):
r"""The request message for
[Versions.LoadVersion][google.cloud.dialogflow.cx.v3beta1.Versions.LoadVersion].
Attributes:
name (str):
Required. The
[Version][google.cloud.dialogflow.cx.v3beta1.Version] to be
loaded to draft flow. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
allow_override_agent_resources (bool):
This field is used to prevent accidental overwrite of other
agent resources, which can potentially impact other flow's
behavior. If ``allow_override_agent_resources`` is false,
conflicted agent-level resources will not be overridden
(i.e. intents, entities, webhooks).
"""
name = proto.Field(proto.STRING, number=1,)
allow_override_agent_resources = proto.Field(proto.BOOL, number=2,)
class CompareVersionsRequest(proto.Message):
r"""The request message for
[Versions.CompareVersions][google.cloud.dialogflow.cx.v3beta1.Versions.CompareVersions].
Attributes:
base_version (str):
Required. Name of the base flow version to compare with the
target version. Use version ID ``0`` to indicate the draft
version of the specified flow.
Format:
``projects/<Project ID>/locations/<Location ID>/agents/ <Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
target_version (str):
Required. Name of the target flow version to compare with
the base version. Use version ID ``0`` to indicate the draft
version of the specified flow. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/versions/<Version ID>``.
language_code (str):
The language to compare the flow versions for.
If not specified, the agent's default language is used.
`Many
languages <https://cloud.google.com/dialogflow/docs/reference/language>`__
are supported. Note: languages must be enabled in the agent
before they can be used.
"""
base_version = proto.Field(proto.STRING, number=1,)
target_version = proto.Field(proto.STRING, number=2,)
language_code = proto.Field(proto.STRING, number=3,)
class CompareVersionsResponse(proto.Message):
r"""The response message for
[Versions.CompareVersions][google.cloud.dialogflow.cx.v3beta1.Versions.CompareVersions].
Attributes:
base_version_content_json (str):
JSON representation of the base version
content.
target_version_content_json (str):
JSON representation of the target version
content.
compare_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp when the two version compares.
"""
base_version_content_json = proto.Field(proto.STRING, number=1,)
target_version_content_json = proto.Field(proto.STRING, number=2,)
compare_time = proto.Field(
proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9302")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9302")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
import sys
import re
import os
# import string
import numpy as np
import subprocess as sub
from Bio import SeqIO
from Bio.Data import CodonTable
import pandas as pd
# import scipy.sparse as sparse
# import scipy.stats as st
# import flu_module as flu
from Bio import SeqIO
from Bio.Data import CodonTable
import itertools
import matplotlib.pyplot as plt
############################################################################################
def flutraqGetBaseName(runid):
# runid must be a single int value:
runid = int(runid)
#
cmd = ["/home/zeldovik/darpa/reassort-paper-FULL/flutraqGetBaseName.pl","-r",str(runid)]
output = sub.check_output(cmd)
if output:
return output.strip()
# example: "RUN1234_H3N2_MDCK_pass002_barcodeTCCAAT\n"
else:
print "flutraqGetBaseName.pl returned nothing!"
sys.exit(1)
def flutraqGetFORMOI(runid):
# runid must be a single int value:
runid = int(runid)
#
cmd = ["/home/zeldovik/darpa/reassort-paper-FULL/flutraqGetFORMOI.pl","-r",str(runid)]
output = sub.check_output(cmd)
if output:
# return tuple(output.strip().split('\t')[1:])
return pd.Series(output.strip().split('\t'),index=['runid','OS','Favi','Ribo','MOI'])
# example "1234\tOS0\tF0\tR0\tM0.001\n"
else:
print "flutraqGetFORMOI.pl returned nothing!"
sys.exit(1)
def flutraqGetDataName(runid,data_type='coverage'):
# runid must be a single int value:
runid = int(runid)
#
base_name = flutraqGetBaseName(runid)
# base name example: "RUN1234_H3N2_MDCK_pass002_barcodeTCCAAT"
strain = base_name.split('_')[1]+'FULL'
# and the full base name is ...
base_name_full = "%s.%s.blast.%s"%(base_name,strain,data_type)
# the PATH "/data/zeldovich/flustore/runs/RUN1234/nucleoCountCumul.FULL"
data_fname = "/data/zeldovich/flustore/runs/RUN%04d/nucleoCountCumul.FULL/%s"%(runid,base_name_full)
return data_fname
def flutraqCheckDataIntegrity(runid_list):
# get the data file name of the corresponding runid ...
fname_list = [ flutraqGetDataName(runid) for runid in runid_list ]
# check directories existence ...
existence_list = [ os.path.exists(fname) for fname in fname_list ]
# get existing runid-s only ...
runid_exist = ','.join(runid for runid,existence in zip(runid_list,existence_list) if existence)
# return quantity and runids themselves ...
return sum(existence_list),runid_exist
############################################################################################
favi_data_path = "~/DATA/FaviProcessedData"
# need this for some manipulations in .freq, .coverage and other flutraq data files ...
nt_index = dict( zip(list('ATGC'),range(4)) )
# genetic code ...
genetic_code = CodonTable.standard_dna_table.forward_table
stop_codons = dict([ (codon,'*') for codon in CodonTable.standard_dna_table.stop_codons ])
# make genetic code contain both 61 aacid codons and 3 stop codons ...
genetic_code.update(stop_codons)
# Kyte-Doolittle hydrophobicity scale ...
from Bio.SeqUtils import ProtParamData
KD = ProtParamData.kd
KD['*'] = 50. # just to make ->STOP, STOP-> to stand out...
def translateCStoAAS(codon_snps):
# 'ATG->GTC,CAT->AAA' translated to 'M->V,H->K'
return ','.join('->'.join(genetic_code[c] for c in cs.split('->')) for cs in codon_snps.split(','))
def getdKDforAAS(aa_subs):
# 'M->V,H->K' to 'KD(M)-KD(V),KD(H)-KD(K)'
get_dKD = lambda aa1,aa2: KD[aa2]-KD[aa1]
return ','.join( "%.3f"%get_dKD( *aas.split('->') ) for aas in aa_subs.split(','))
# upload data ...
run_data = pd.read_csv('runid.csv')
# because there are multiple instances of 'runid' for some entries,
# let's choose workable ones first!
run_data['num_runid_exist'],run_data['runid_exist'] = zip(*run_data['runid'].str.split('/').apply(flutraqCheckDataIntegrity))
# if we have redundancy, let human decide what to do ...
if (run_data['num_runid_exist']!=1).any():
print "There are redundant runids! Impossible to proceede ..."
sys.exit(1)
else:
print "Data seem well-defined! Do analysis!"
# GET INFO FOR EVERY INSTANCE ...
# flutraqGetFORMOI(runid)
# example "1234\tOS0\tF0\tR0\tM0.001\n"
run_info = run_data['runid_exist'].apply(flutraqGetFORMOI)
# merge data tables ...
run_data = run_data.merge(run_info,left_on='runid_exist',right_on='runid',suffixes=['_all','_duplicate'])
#
# fix Ribo data, 'R' is apparently no info thing, should be changed to 'R0', implying no Ribo added ...
run_data['Ribo'] = run_data['Ribo'].replace('R','R0')
# for each runid, corresponding data files 'coverage' or 'count-all' or whatever, weights
# less than or about 1MB each, so we can safely and easily upload them into memory ...
# it's ~100*(1Mb+1Mb) ~200Mb, both for coverage and for frequencies ...
# we've expanded that significantly since then: ~5X or something ...
snp_info = {}
# snp_info[runid] = tmp_snp_info
for runid in run_data['runid_exist']:
snp_info[runid] = pd.read_csv( os.path.join(favi_data_path,"%s.csv"%str(runid)) )
def getSNPcount(df,fst,dst):
return df[(df['freq']>fst)&(df['cover']>dst)].shape[0]
def getSNPdensity(df,fst,dst):
count = df[(df['freq']>fst)&(df['cover']>dst)].shape[0]
count_tot = df[(df['cover']>dst)].shape[0]
return count/float(count_tot)
def get_drug_type(drug_tuple):
ost,favi,ribo = drug_tuple
ost = float( ost.lstrip('OS') )
favi = float( favi.lstrip('F') )
ribo = float( ribo.lstrip('R') )
if (ost<1e-4)and(favi<1e-4)and(ribo<1e-4):
return "NOdrug"
elif ost>1e-4:
return "OSelt"
elif favi>1e-4:
return "Favi"
elif ribo>1e-4:
return "Ribo"
else:
print "Supposedly, there are no mixtures of drugs in the data ..."
print "Termination!"
sys.exit(1)
#####################################
# # group run_data by drug presence/concentration 'OS' 'Favi' 'Ribo'
# run_drug_group = run_data.groupby(by=['OS','Favi','Ribo'])
# runid_grouped = run_drug_group['runid_exist']
run_data['drug'] = run_data[['OS','Favi','Ribo']].apply(get_drug_type,axis=1)
run_drug_group = run_data.groupby(by='drug')
runid_grouped = run_drug_group['runid_exist']
colors = itertools.cycle(["r", "b", "g"])
def bar_grouped_drug(drug_group,fst,dst):
# for rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
x_ind = 0
bar_width = 0.3
x_ticks = []
for drug, runid_exist in drug_group:
tick_shift = 0
color = next(colors)
for runid in runid_exist:
bar_height = getSNPcount(snp_info[runid],fst,dst)
plt.bar(x_ind,bar_height,bar_width,color=color)
x_ind += bar_width
tick_shift += bar_width
# collect x tciks just in case ...
x_ticks.append( (drug,x_ind-0.5*tick_shift) )
x_ind += 3.0*bar_width # make a gap between the groups ...
#
labels,ticks_pos = zip(*x_ticks)
ax = plt.gca()
ax.set_xticks(ticks_pos)
ax.set_xticklabels(labels)
ax.set_ylabel('SNP count')
ax.set_xlabel('drug group')
ax.set_title('made for d>%.1f and f>%.4f'%(dst,fst))
plt.show()
fst_list = [0.005,0.01,0.3]
dst_list = [100,200,500,1000]
# let's create a pandas PANEL of SNP counts with items - runids, major axis d_star and minor axis f_star ...
to_panel = {}
for runid in run_data['runid_exist']:
to_panel[runid] = {}
for dst in dst_list:
to_panel[runid][dst] = {}
for fst in fst_list:
value = getSNPcount(snp_info[runid],fst,dst)
# print runid,fst,dst,value
# to_panel[runid][dst].append( value )
to_panel[runid][dst][fst] = value
# make it a panel with corresponding indexes ...
rdf_panel = pd.Panel(to_panel)
# rdf_panel['981'].loc[0.010,100]
| |
from six import print_ as xprint
from six.moves import input
import random
HANGMANPICS = ['''
+---+
| |
|
|
|
|
=========''', '''
+---+
| |
O |
|
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
/|\ |
|
|
=========''', '''
+---+
| |
O |
/|\ |
/ |
|
=========''', '''
+---+
| |
O |
/|\ |
/ \ |
|
=========''']
words = ('ant baboon badger bat bear beaver camel cat clam cobra cougar '
'coyote crow deer dog donkey duck eagle ferret fox frog goat '
'goose hawk lion lizard llama mole monkey moose mouse mule newt '
'otter owl panda parrot pigeon python rabbit ram rat raven '
'rhino salmon seal shark sheep skunk sloth snake spider '
'stork swan tiger toad trout turkey turtle weasel whale wolf '
'wombat zebra ').split()
class Hangman:
def __init__(self, words):
"""Initializes the game state
Selects the secret word for the game by a random choice
from a list of words.
Args:
words (list of strings): List of words to choose from
"""
self._missed_letters = ''
self._correct_letters = ''
self._secret_word = random.choice(words)
self._game_is_done = False
def _display_board(self):
"""Displays the current status of the game that is being played."""
xprint(HANGMANPICS[len(self._missed_letters)])
xprint()
xprint('Missed letters:', end=' ')
for letter in self._missed_letters:
xprint(letter, end=' ')
xprint()
blanks = '_' * len(self._secret_word)
# replace blanks with correctly guessed letters
for i in range(len(self._secret_word)):
if self._secret_word[i] in self._correct_letters:
blanks = blanks[:i] + self._secret_word[i] + blanks[i+1:]
# show the secret word with spaces in between each letter
for letter in blanks:
xprint(letter, end=' ')
xprint()
def _get_guess(self, already_guessed):
"""Gets the input from the user.
Makes sure that the input entered is a letter and
the letter entered is not already guessed by the user.
"""
while True:
xprint('Guess a letter.')
guess = input().lower()
if len(guess) != 1:
xprint('Please enter a single letter.')
elif guess in already_guessed:
xprint('You have already guessed that letter. Choose again.')
elif guess not in 'abcdefghijklmnopqrstuvwxyz':
xprint('Please enter a LETTER.')
else:
return guess
def _check_win(self):
"""Returns True if the user has won, False otherwise.
Checks if the user has correctly guessed the secret word.
"""
for i in range(len(self._secret_word)):
if self._secret_word[i] not in self._correct_letters:
return False
xprint('Yes! The secret word is "{0}"! '
'You have won!'.format(self._secret_word))
return True
def _check_lost(self):
"""Returns True if the user has lost, False otherwise.
Alerts the user if all his chances have been used, without
guessing the secret word.
"""
if len(self._missed_letters) == len(HANGMANPICS) - 1:
self._display_board()
missed = len(self._missed_letters)
correct = len(self._correct_letters)
word = self._secret_word
xprint('You have run out of guesses!')
xprint('After {0} missed guesses and {1} correct guesses, '
'the word was "{2}"'.format(missed, correct, word))
return True
return False
def run(self):
"""Initialises the game play and coordinates the game activities."""
xprint('H A N G M A N')
while not self._game_is_done:
self._display_board()
guessed_letters = self._missed_letters + self._correct_letters
guess = self._get_guess(guessed_letters)
if guess in self._secret_word:
self._correct_letters = self._correct_letters + guess
self._game_is_done = self._check_win()
else:
self._missed_letters = self._missed_letters + guess
self._game_is_done = self._check_lost()
def play_again():
"""Returns True if the player wants to play again, False otherwise."""
xprint('Do you want to play again? (yes or no)')
return input().lower() == 'yes'
def main():
"""Main application entry point."""
current_game = Hangman(words)
while True:
current_game.run()
if play_again():
current_game = Hangman(words)
else:
break
if __name__ == "__main__":
main()
| |
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1SecurityContextConstraints(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
{
'class': 'ApiV1',
'type': 'update',
'method': 'replace_securitycontextconstraint',
'namespaced': False
},
{
'class': 'ApiV1',
'type': 'delete',
'method': 'delete_securitycontextconstraint',
'namespaced': False
},
{
'class': 'ApiV1',
'type': 'read',
'method': 'get_securitycontextconstraint',
'namespaced': False
},
{
'class': 'ApiV1',
'type': 'create',
'method': 'create_securitycontextconstraint',
'namespaced': False
},
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'kind': 'str',
'api_version': 'str',
'metadata': 'V1ObjectMeta',
'priority': 'int',
'allow_privileged_container': 'bool',
'default_add_capabilities': 'list[V1Capability]',
'required_drop_capabilities': 'list[V1Capability]',
'allowed_capabilities': 'list[V1Capability]',
'allow_host_dir_volume_plugin': 'bool',
'volumes': 'list[V1FSType]',
'allow_host_network': 'bool',
'allow_host_ports': 'bool',
'allow_host_pid': 'bool',
'allow_host_ipc': 'bool',
'se_linux_context': 'V1SELinuxContextStrategyOptions',
'run_as_user': 'V1RunAsUserStrategyOptions',
'supplemental_groups': 'V1SupplementalGroupsStrategyOptions',
'fs_group': 'V1FSGroupStrategyOptions',
'read_only_root_filesystem': 'bool',
'users': 'list[str]',
'groups': 'list[str]'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'kind': 'kind',
'api_version': 'apiVersion',
'metadata': 'metadata',
'priority': 'priority',
'allow_privileged_container': 'allowPrivilegedContainer',
'default_add_capabilities': 'defaultAddCapabilities',
'required_drop_capabilities': 'requiredDropCapabilities',
'allowed_capabilities': 'allowedCapabilities',
'allow_host_dir_volume_plugin': 'allowHostDirVolumePlugin',
'volumes': 'volumes',
'allow_host_network': 'allowHostNetwork',
'allow_host_ports': 'allowHostPorts',
'allow_host_pid': 'allowHostPID',
'allow_host_ipc': 'allowHostIPC',
'se_linux_context': 'seLinuxContext',
'run_as_user': 'runAsUser',
'supplemental_groups': 'supplementalGroups',
'fs_group': 'fsGroup',
'read_only_root_filesystem': 'readOnlyRootFilesystem',
'users': 'users',
'groups': 'groups'
}
def __init__(self, kind=None, api_version=None, metadata=None, priority=None, allow_privileged_container=None, default_add_capabilities=None, required_drop_capabilities=None, allowed_capabilities=None, allow_host_dir_volume_plugin=None, volumes=None, allow_host_network=None, allow_host_ports=None, allow_host_pid=None, allow_host_ipc=None, se_linux_context=None, run_as_user=None, supplemental_groups=None, fs_group=None, read_only_root_filesystem=None, users=None, groups=None):
"""
V1SecurityContextConstraints - a model defined in Swagger
"""
self._kind = kind
self._api_version = api_version
self._metadata = metadata
self._priority = priority
self._allow_privileged_container = allow_privileged_container
self._default_add_capabilities = default_add_capabilities
self._required_drop_capabilities = required_drop_capabilities
self._allowed_capabilities = allowed_capabilities
self._allow_host_dir_volume_plugin = allow_host_dir_volume_plugin
self._volumes = volumes
self._allow_host_network = allow_host_network
self._allow_host_ports = allow_host_ports
self._allow_host_pid = allow_host_pid
self._allow_host_ipc = allow_host_ipc
self._se_linux_context = se_linux_context
self._run_as_user = run_as_user
self._supplemental_groups = supplemental_groups
self._fs_group = fs_group
self._read_only_root_filesystem = read_only_root_filesystem
self._users = users
self._groups = groups
@property
def kind(self):
"""
Gets the kind of this V1SecurityContextConstraints.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1SecurityContextConstraints.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1SecurityContextConstraints.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1SecurityContextConstraints.
:type: str
"""
self._kind = kind
@property
def api_version(self):
"""
Gets the api_version of this V1SecurityContextConstraints.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:return: The api_version of this V1SecurityContextConstraints.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1SecurityContextConstraints.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.2/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1SecurityContextConstraints.
:type: str
"""
self._api_version = api_version
@property
def metadata(self):
"""
Gets the metadata of this V1SecurityContextConstraints.
:return: The metadata of this V1SecurityContextConstraints.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1SecurityContextConstraints.
:param metadata: The metadata of this V1SecurityContextConstraints.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def priority(self):
"""
Gets the priority of this V1SecurityContextConstraints.
Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. If scores for multiple SCCs are equal they will be sorted by name.
:return: The priority of this V1SecurityContextConstraints.
:rtype: int
"""
return self._priority
@priority.setter
def priority(self, priority):
"""
Sets the priority of this V1SecurityContextConstraints.
Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. If scores for multiple SCCs are equal they will be sorted by name.
:param priority: The priority of this V1SecurityContextConstraints.
:type: int
"""
self._priority = priority
@property
def allow_privileged_container(self):
"""
Gets the allow_privileged_container of this V1SecurityContextConstraints.
AllowPrivilegedContainer determines if a container can request to be run as privileged.
:return: The allow_privileged_container of this V1SecurityContextConstraints.
:rtype: bool
"""
return self._allow_privileged_container
@allow_privileged_container.setter
def allow_privileged_container(self, allow_privileged_container):
"""
Sets the allow_privileged_container of this V1SecurityContextConstraints.
AllowPrivilegedContainer determines if a container can request to be run as privileged.
:param allow_privileged_container: The allow_privileged_container of this V1SecurityContextConstraints.
:type: bool
"""
self._allow_privileged_container = allow_privileged_container
@property
def default_add_capabilities(self):
"""
Gets the default_add_capabilities of this V1SecurityContextConstraints.
DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.
:return: The default_add_capabilities of this V1SecurityContextConstraints.
:rtype: list[V1Capability]
"""
return self._default_add_capabilities
@default_add_capabilities.setter
def default_add_capabilities(self, default_add_capabilities):
"""
Sets the default_add_capabilities of this V1SecurityContextConstraints.
DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.
:param default_add_capabilities: The default_add_capabilities of this V1SecurityContextConstraints.
:type: list[V1Capability]
"""
self._default_add_capabilities = default_add_capabilities
@property
def required_drop_capabilities(self):
"""
Gets the required_drop_capabilities of this V1SecurityContextConstraints.
RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.
:return: The required_drop_capabilities of this V1SecurityContextConstraints.
:rtype: list[V1Capability]
"""
return self._required_drop_capabilities
@required_drop_capabilities.setter
def required_drop_capabilities(self, required_drop_capabilities):
"""
Sets the required_drop_capabilities of this V1SecurityContextConstraints.
RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.
:param required_drop_capabilities: The required_drop_capabilities of this V1SecurityContextConstraints.
:type: list[V1Capability]
"""
self._required_drop_capabilities = required_drop_capabilities
@property
def allowed_capabilities(self):
"""
Gets the allowed_capabilities of this V1SecurityContextConstraints.
AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
:return: The allowed_capabilities of this V1SecurityContextConstraints.
:rtype: list[V1Capability]
"""
return self._allowed_capabilities
@allowed_capabilities.setter
def allowed_capabilities(self, allowed_capabilities):
"""
Sets the allowed_capabilities of this V1SecurityContextConstraints.
AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.
:param allowed_capabilities: The allowed_capabilities of this V1SecurityContextConstraints.
:type: list[V1Capability]
"""
self._allowed_capabilities = allowed_capabilities
@property
def allow_host_dir_volume_plugin(self):
"""
Gets the allow_host_dir_volume_plugin of this V1SecurityContextConstraints.
AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin
:return: The allow_host_dir_volume_plugin of this V1SecurityContextConstraints.
:rtype: bool
"""
return self._allow_host_dir_volume_plugin
@allow_host_dir_volume_plugin.setter
def allow_host_dir_volume_plugin(self, allow_host_dir_volume_plugin):
"""
Sets the allow_host_dir_volume_plugin of this V1SecurityContextConstraints.
AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin
:param allow_host_dir_volume_plugin: The allow_host_dir_volume_plugin of this V1SecurityContextConstraints.
:type: bool
"""
self._allow_host_dir_volume_plugin = allow_host_dir_volume_plugin
@property
def volumes(self):
"""
Gets the volumes of this V1SecurityContextConstraints.
:return: The volumes of this V1SecurityContextConstraints.
:rtype: list[V1FSType]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""
Sets the volumes of this V1SecurityContextConstraints.
:param volumes: The volumes of this V1SecurityContextConstraints.
:type: list[V1FSType]
"""
self._volumes = volumes
@property
def allow_host_network(self):
"""
Gets the allow_host_network of this V1SecurityContextConstraints.
AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
:return: The allow_host_network of this V1SecurityContextConstraints.
:rtype: bool
"""
return self._allow_host_network
@allow_host_network.setter
def allow_host_network(self, allow_host_network):
"""
Sets the allow_host_network of this V1SecurityContextConstraints.
AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.
:param allow_host_network: The allow_host_network of this V1SecurityContextConstraints.
:type: bool
"""
self._allow_host_network = allow_host_network
@property
def allow_host_ports(self):
"""
Gets the allow_host_ports of this V1SecurityContextConstraints.
AllowHostPorts determines if the policy allows host ports in the containers.
:return: The allow_host_ports of this V1SecurityContextConstraints.
:rtype: bool
"""
return self._allow_host_ports
@allow_host_ports.setter
def allow_host_ports(self, allow_host_ports):
"""
Sets the allow_host_ports of this V1SecurityContextConstraints.
AllowHostPorts determines if the policy allows host ports in the containers.
:param allow_host_ports: The allow_host_ports of this V1SecurityContextConstraints.
:type: bool
"""
self._allow_host_ports = allow_host_ports
@property
def allow_host_pid(self):
"""
Gets the allow_host_pid of this V1SecurityContextConstraints.
AllowHostPID determines if the policy allows host pid in the containers.
:return: The allow_host_pid of this V1SecurityContextConstraints.
:rtype: bool
"""
return self._allow_host_pid
@allow_host_pid.setter
def allow_host_pid(self, allow_host_pid):
"""
Sets the allow_host_pid of this V1SecurityContextConstraints.
AllowHostPID determines if the policy allows host pid in the containers.
:param allow_host_pid: The allow_host_pid of this V1SecurityContextConstraints.
:type: bool
"""
self._allow_host_pid = allow_host_pid
@property
def allow_host_ipc(self):
"""
Gets the allow_host_ipc of this V1SecurityContextConstraints.
AllowHostIPC determines if the policy allows host ipc in the containers.
:return: The allow_host_ipc of this V1SecurityContextConstraints.
:rtype: bool
"""
return self._allow_host_ipc
@allow_host_ipc.setter
def allow_host_ipc(self, allow_host_ipc):
"""
Sets the allow_host_ipc of this V1SecurityContextConstraints.
AllowHostIPC determines if the policy allows host ipc in the containers.
:param allow_host_ipc: The allow_host_ipc of this V1SecurityContextConstraints.
:type: bool
"""
self._allow_host_ipc = allow_host_ipc
@property
def se_linux_context(self):
"""
Gets the se_linux_context of this V1SecurityContextConstraints.
SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.
:return: The se_linux_context of this V1SecurityContextConstraints.
:rtype: V1SELinuxContextStrategyOptions
"""
return self._se_linux_context
@se_linux_context.setter
def se_linux_context(self, se_linux_context):
"""
Sets the se_linux_context of this V1SecurityContextConstraints.
SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.
:param se_linux_context: The se_linux_context of this V1SecurityContextConstraints.
:type: V1SELinuxContextStrategyOptions
"""
self._se_linux_context = se_linux_context
@property
def run_as_user(self):
"""
Gets the run_as_user of this V1SecurityContextConstraints.
RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.
:return: The run_as_user of this V1SecurityContextConstraints.
:rtype: V1RunAsUserStrategyOptions
"""
return self._run_as_user
@run_as_user.setter
def run_as_user(self, run_as_user):
"""
Sets the run_as_user of this V1SecurityContextConstraints.
RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.
:param run_as_user: The run_as_user of this V1SecurityContextConstraints.
:type: V1RunAsUserStrategyOptions
"""
self._run_as_user = run_as_user
@property
def supplemental_groups(self):
"""
Gets the supplemental_groups of this V1SecurityContextConstraints.
SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
:return: The supplemental_groups of this V1SecurityContextConstraints.
:rtype: V1SupplementalGroupsStrategyOptions
"""
return self._supplemental_groups
@supplemental_groups.setter
def supplemental_groups(self, supplemental_groups):
"""
Sets the supplemental_groups of this V1SecurityContextConstraints.
SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
:param supplemental_groups: The supplemental_groups of this V1SecurityContextConstraints.
:type: V1SupplementalGroupsStrategyOptions
"""
self._supplemental_groups = supplemental_groups
@property
def fs_group(self):
"""
Gets the fs_group of this V1SecurityContextConstraints.
FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
:return: The fs_group of this V1SecurityContextConstraints.
:rtype: V1FSGroupStrategyOptions
"""
return self._fs_group
@fs_group.setter
def fs_group(self, fs_group):
"""
Sets the fs_group of this V1SecurityContextConstraints.
FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.
:param fs_group: The fs_group of this V1SecurityContextConstraints.
:type: V1FSGroupStrategyOptions
"""
self._fs_group = fs_group
@property
def read_only_root_filesystem(self):
"""
Gets the read_only_root_filesystem of this V1SecurityContextConstraints.
ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.
:return: The read_only_root_filesystem of this V1SecurityContextConstraints.
:rtype: bool
"""
return self._read_only_root_filesystem
@read_only_root_filesystem.setter
def read_only_root_filesystem(self, read_only_root_filesystem):
"""
Sets the read_only_root_filesystem of this V1SecurityContextConstraints.
ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.
:param read_only_root_filesystem: The read_only_root_filesystem of this V1SecurityContextConstraints.
:type: bool
"""
self._read_only_root_filesystem = read_only_root_filesystem
@property
def users(self):
"""
Gets the users of this V1SecurityContextConstraints.
The users who have permissions to use this security context constraints
:return: The users of this V1SecurityContextConstraints.
:rtype: list[str]
"""
return self._users
@users.setter
def users(self, users):
"""
Sets the users of this V1SecurityContextConstraints.
The users who have permissions to use this security context constraints
:param users: The users of this V1SecurityContextConstraints.
:type: list[str]
"""
self._users = users
@property
def groups(self):
"""
Gets the groups of this V1SecurityContextConstraints.
The groups that have permission to use this security context constraints
:return: The groups of this V1SecurityContextConstraints.
:rtype: list[str]
"""
return self._groups
@groups.setter
def groups(self, groups):
"""
Sets the groups of this V1SecurityContextConstraints.
The groups that have permission to use this security context constraints
:param groups: The groups of this V1SecurityContextConstraints.
:type: list[str]
"""
self._groups = groups
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1SecurityContextConstraints.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
import logging
from .client import Client
from .lock import Lock
_log = logging.getLogger(__name__)
# Prevent "no handler" warnings to stderr in projects that do not configure
# logging.
try:
from logging import NullHandler
except ImportError:
# Python <2.7, just define it.
class NullHandler(logging.Handler):
def emit(self, record):
pass
_log.addHandler(NullHandler())
class EtcdResult(object):
_node_props = {
'key': None,
'value': None,
'expiration': None,
'ttl': None,
'modifiedIndex': None,
'createdIndex': None,
'newKey': False,
'dir': False,
}
def __init__(self, action=None, node=None, prevNode=None, **kwdargs):
"""
Creates an EtcdResult object.
Args:
action (str): The action that resulted in key creation
node (dict): The dictionary containing all node information.
prevNode (dict): The dictionary containing previous node information.
"""
self.action = action
for (key, default) in self._node_props.items():
if key in node:
setattr(self, key, node[key])
else:
setattr(self, key, default)
self._children = []
if self.dir and 'nodes' in node:
# We keep the data in raw format, converting them only when needed
self._children = node['nodes']
if prevNode:
self._prev_node = EtcdResult(None, node=prevNode)
# See issue 38: when returning a write() op etcd has a bogus result.
if self._prev_node.dir and not self.dir:
self.dir = True
def parse_headers(self, response):
headers = response.getheaders()
self.etcd_index = int(headers.get('x-etcd-index', 1))
self.raft_index = int(headers.get('x-raft-index', 1))
def get_subtree(self, leaves_only=False):
"""
Get all the subtree resulting from a recursive=true call to etcd.
Args:
leaves_only (bool): if true, only value nodes are returned
"""
if not self._children:
#if the current result is a leaf, return itself
yield self
return
else:
# node is not a leaf
if not leaves_only:
yield self
for n in self._children:
node = EtcdResult(None, n)
for child in node.get_subtree(leaves_only=leaves_only):
yield child
return
@property
def leaves(self):
return self.get_subtree(leaves_only=True)
@property
def children(self):
""" Deprecated, use EtcdResult.leaves instead """
return self.leaves
def __eq__(self, other):
if not (type(self) is type(other)):
return False
for k in self._node_props.keys():
try:
a = getattr(self, k)
b = getattr(other, k)
if a != b:
return False
except:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r)" % (self.__class__, self.__dict__)
class EtcdException(Exception):
"""
Generic Etcd Exception.
"""
def __init__(self, message=None, payload=None):
super(EtcdException, self).__init__(message)
self.payload = payload
class EtcdValueError(EtcdException, ValueError):
"""
Base class for Etcd value-related errors.
"""
pass
class EtcdCompareFailed(EtcdValueError):
"""
Compare-and-swap failure
"""
pass
class EtcdClusterIdChanged(EtcdException):
"""
The etcd cluster ID changed. This may indicate the cluster was replaced
with a backup. Raised to prevent waiting on an etcd_index that was only
valid on the old cluster.
"""
pass
class EtcdKeyError(EtcdException):
"""
Etcd Generic KeyError Exception
"""
pass
class EtcdKeyNotFound(EtcdKeyError):
"""
Etcd key not found exception (100)
"""
pass
class EtcdNotFile(EtcdKeyError):
"""
Etcd not a file exception (102)
"""
pass
class EtcdNotDir(EtcdKeyError):
"""
Etcd not a directory exception (104)
"""
pass
class EtcdAlreadyExist(EtcdKeyError):
"""
Etcd already exist exception (105)
"""
pass
class EtcdEventIndexCleared(EtcdException):
"""
Etcd event index is outdated and cleared exception (401)
"""
pass
class EtcdConnectionFailed(EtcdException):
"""
Connection to etcd failed.
"""
def __init__(self, message=None, payload=None, cause=None):
super(EtcdConnectionFailed, self).__init__(message=message,
payload=payload)
self.cause = cause
class EtcdWatchTimedOut(EtcdConnectionFailed):
"""
A watch timed out without returning a result.
"""
pass
class EtcdWatcherCleared(EtcdException):
"""
Watcher is cleared due to etcd recovery.
"""
pass
class EtcdLeaderElectionInProgress(EtcdException):
"""
Request failed due to in-progress leader election.
"""
pass
class EtcdRootReadOnly(EtcdKeyError):
"""
Operation is not valid on the root, which is read only.
"""
pass
class EtcdDirNotEmpty(EtcdValueError):
"""
Directory not empty.
"""
pass
class EtcdLockExpired(EtcdException):
"""
Our lock apparently expired while we were trying to acquire it.
"""
class EtcdError(object):
# See https://github.com/coreos/etcd/blob/master/Documentation/errorcode.md
error_exceptions = {
100: EtcdKeyNotFound,
101: EtcdCompareFailed,
102: EtcdNotFile,
# 103: Non-public: no more peers.
104: EtcdNotDir,
105: EtcdAlreadyExist,
# 106: Non-public: key is preserved.
107: EtcdRootReadOnly,
108: EtcdDirNotEmpty,
# 109: Non-public: existing peer addr.
200: EtcdValueError,
201: EtcdValueError,
202: EtcdValueError,
203: EtcdValueError,
204: EtcdValueError,
205: EtcdValueError,
206: EtcdValueError,
207: EtcdValueError,
208: EtcdValueError,
209: EtcdValueError,
210: EtcdValueError,
# 300: Non-public: Raft internal error.
301: EtcdLeaderElectionInProgress,
400: EtcdWatcherCleared,
401: EtcdEventIndexCleared,
}
@classmethod
def handle(cls, payload):
"""
Decodes the error and throws the appropriate error message
:param payload: The decoded JSON error payload as a dict.
"""
error_code = payload.get("errorCode")
message = payload.get("message")
cause = payload.get("cause")
msg = '{} : {}'.format(message, cause)
exc = cls.error_exceptions.get(error_code, EtcdException)
if issubclass(exc, EtcdException):
raise exc(msg, payload)
else:
raise exc(msg)
# Attempt to enable urllib3's SNI support, if possible
# Blatantly copied from requests.
try:
from urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
| |
from __future__ import absolute_import
import numpy as nm
import numpy.linalg as nla
from sfepy.base.base import assert_, Struct
from sfepy.discrete import PolySpace
from sfepy.linalg import combine, insert_strided_axis
from six.moves import range
from functools import reduce
# Requires fixed vertex numbering!
vertex_maps = {3 : [[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]],
2 : [[0, 0],
[1, 0],
[1, 1],
[0, 1]],
1 : [[0],
[1]],
0 : [[0]]}
class LagrangeNodes(Struct):
"""Helper class for defining nodes of Lagrange elements."""
@staticmethod
def append_edges(nodes, nts, iseq, nt, edges, order):
delta = 1.0 / float(order)
for ii, edge in enumerate(edges):
n1 = nodes[edge[0],:].copy()
n2 = nodes[edge[1],:].copy()
for ie in range(order - 1):
c2 = ie + 1
c1 = order - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp in delta * (c1 * n1 + c2 * n2)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_faces(nodes, nts, iseq, nt, faces, order):
delta = 1.0 / float(order)
for ii, face in enumerate(faces):
n1 = nodes[face[0],:].copy()
n2 = nodes[face[1],:].copy()
n3 = nodes[face[2],:].copy()
for i1 in range(order - 2):
for i2 in range(order - 2 - i1):
c3 = i1 + 1
c2 = i2 + 1
c1 = order - c3 - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp
in delta * (c1 * n1 + c2 * n2 + c3 * n3)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_bubbles(nodes, nts, iseq, nt, order):
delta = 1.0 / float(order)
n1 = nodes[0,:].copy()
n2 = nodes[1,:].copy()
n3 = nodes[2,:].copy()
n4 = nodes[3,:].copy()
for i1 in range(order - 3):
for i2 in range(order - 3):
for i3 in range(order - 3 - i1 - i2):
c4 = i1 + 1
c3 = i2 + 1
c2 = i3 + 1
c1 = order - c4 - c3 - c2
nts[iseq] = [nt, 0]
aux = [int(round(tmp)) for tmp
in delta * (c1 * n1 + c2 * n2 + c3 * n3 + c4 * n4)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_edges(nodes, nts, iseq, nt, edges, ao):
delta = 1.0 / float(ao)
for ii, edge in enumerate(edges):
n1 = nodes[edge[0],:].copy()
n2 = nodes[edge[1],:].copy()
for ie in range(ao - 1):
c2 = ie + 1
c1 = ao - c2
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp in delta * (c1 * n1 + c2 * n2)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_faces(nodes, nts, iseq, nt, faces, ao):
delta = 1.0 / (float(ao) ** 2)
for ii, face in enumerate(faces):
n1 = nodes[face[0],:].copy()
n2 = nodes[face[1],:].copy()
n3 = nodes[face[2],:].copy()
n4 = nodes[face[3],:].copy()
for i1 in range(ao - 1):
for i2 in range(ao - 1):
c4 = i1 + 1
c3 = i2 + 1
c2 = ao - c4
c1 = ao - c3
nts[iseq] = [nt, ii]
aux = [int(round(tmp)) for tmp
in delta * (c1 * c2 * n1 + c2 * c3 * n2
+ c3 * c4 * n3 + c4 * c1 * n4)]
nodes[iseq,:] = aux
iseq += 1
return iseq
@staticmethod
def append_tp_bubbles(nodes, nts, iseq, nt, ao):
delta = 1.0 / (float(ao) ** 3)
n1 = nodes[0,:].copy()
n2 = nodes[1,:].copy()
n3 = nodes[2,:].copy()
n4 = nodes[3,:].copy()
n5 = nodes[4,:].copy()
n6 = nodes[5,:].copy()
n7 = nodes[6,:].copy()
n8 = nodes[7,:].copy()
for i1 in range(ao - 1):
for i2 in range(ao - 1):
for i3 in range(ao - 1):
c6 = i1 + 1
c5 = i2 + 1
c4 = i3 + 1
c3 = ao - c6
c2 = ao - c5
c1 = ao - c4
nts[iseq] = [nt, 0]
aux = [int(round(tmp)) for tmp
in delta * (c1 * c2 * c3 * n1 + c4 * c2 * c3 * n2
+ c5 * c4 * c3 * n3 + c1 * c3 * c5 * n4
+ c1 * c2 * c6 * n5 + c4 * c2 * c6 * n6
+ c5 * c4 * c6 * n7 + c1 * c6 * c5 * n8)]
nodes[iseq,:] = aux
iseq += 1
return iseq
class NodeDescription(Struct):
"""
Describe FE nodes defined on different parts of a reference element.
"""
def _describe_facets(self, ii):
nts = self.node_types[ii]
ik = nm.where(nts[1:,1] > nts[:-1,1])[0]
if len(ik) == 0:
ifacets = None
n_dof = 0
else:
ii = ii.astype(nm.int32)
ik = nm.r_[0, ik + 1, nts.shape[0]]
ifacets = [ii[ik[ir] : ik[ir+1]] for ir in range(len(ik) - 1)]
n_dof = len(ifacets[0])
return ifacets, n_dof
def _describe_other(self, ii):
if len(ii):
return ii, len(ii)
else:
return None, 0
def _get_facet_nodes(self, ifacets, nodes):
if ifacets is None:
return None
else:
return [nodes[ii] for ii in ifacets]
def _get_nodes(self, ii, nodes):
if ii is None:
return None
else:
return nodes[ii]
def __init__(self, node_types, nodes):
self.node_types = node_types
# Vertex nodes.
ii = nm.where(node_types[:,0] == 0)[0]
self.vertex, self.n_vertex_nod = self._describe_other(ii)
self.vertex_nodes = self._get_nodes(self.vertex, nodes)
# Edge nodes.
ii = nm.where(node_types[:,0] == 1)[0]
self.edge, self.n_edge_nod = self._describe_facets(ii)
self.edge_nodes = self._get_facet_nodes(self.edge, nodes)
# Face nodes.
ii = nm.where(node_types[:,0] == 2)[0]
self.face, self.n_face_nod = self._describe_facets(ii)
self.face_nodes = self._get_facet_nodes(self.face, nodes)
# Bubble nodes.
ii = nm.where(node_types[:,0] == 3)[0]
self.bubble, self.n_bubble_nod = self._describe_other(ii)
self.bubble_nodes = self._get_nodes(self.bubble, nodes)
def has_extra_nodes(self):
"""
Return True if the element has some edge, face or bubble nodes.
"""
return (self.n_edge_nod + self.n_face_nod + self.n_bubble_nod) > 0
class FEPolySpace(PolySpace):
"""
Base for FE polynomial space classes.
"""
def get_mtx_i(self):
return self.mtx_i
def describe_nodes(self):
return NodeDescription(self.nts, self.nodes)
class LagrangePolySpace(FEPolySpace):
def create_context(self, cmesh, eps, check_errors, i_max, newton_eps,
tdim=None):
from sfepy.discrete.fem.extmods.bases import CLagrangeContext
ref_coors = self.geometry.coors
if cmesh is not None:
mesh_coors = cmesh.coors
conn = cmesh.get_conn(cmesh.tdim, 0)
mesh_conn = conn.indices.reshape(cmesh.n_el, -1).astype(nm.int32)
if tdim is None:
tdim = cmesh.tdim
else:
mesh_coors = mesh_conn = None
if tdim is None:
raise ValueError('supply either cmesh or tdim!')
ctx = CLagrangeContext(order=self.order,
tdim=tdim,
nodes=self.nodes,
ref_coors=ref_coors,
mesh_coors=mesh_coors,
mesh_conn=mesh_conn,
mtx_i=self.get_mtx_i(),
eps=eps,
check_errors=check_errors,
i_max=i_max,
newton_eps=newton_eps)
return ctx
def _eval_base(self, coors, diff=0, ori=None,
suppress_errors=False, eps=1e-15):
"""
See :func:`PolySpace.eval_base()`.
"""
if diff == 2:
base = self._eval_hessian(coors)
else:
base = self.eval_ctx.evaluate(coors, diff=diff,
eps=eps,
check_errors=not suppress_errors)
return base
class LagrangeSimplexPolySpace(LagrangePolySpace):
"""Lagrange polynomial space on a simplex domain."""
name = 'lagrange_simplex'
def __init__(self, name, geometry, order, init_context=True):
PolySpace.__init__(self, name, geometry, order)
n_v = geometry.n_vertex
mtx = nm.ones((n_v, n_v), nm.float64)
mtx[0:n_v-1,:] = nm.transpose(geometry.coors)
self.mtx_i = nm.ascontiguousarray(nla.inv(mtx))
self.rhs = nm.ones((n_v,), nm.float64)
self.nodes, self.nts, node_coors = self._define_nodes()
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
if init_context:
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=n_v - 1)
else:
self.eval_ctx = None
def _define_nodes(self):
# Factorial.
fac = lambda n : reduce(lambda a, b : a * (b + 1), range(n), 1)
geometry = self.geometry
n_v, dim = geometry.n_vertex, geometry.dim
order = self.order
n_nod = fac(order + dim) // (fac(order) * fac(dim))
## print n_nod, gd
nodes = nm.zeros((n_nod, n_v), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
if order == 0:
nts[0,:] = [3, 0]
nodes[0,:] = nm.zeros((n_v,), nm.int32)
else:
iseq = 0
# Vertex nodes.
nts[0:n_v,0] = 0
nts[0:n_v,1] = nm.arange(n_v, dtype = nm.int32)
aux = order * nm.identity(n_v, dtype = nm.int32)
nodes[iseq:iseq+n_v,:] = aux
iseq += n_v
if dim == 0:
pass
elif dim == 1:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 3,
[[0, 1]], order)
elif dim == 2:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_faces(nodes, nts, iseq, 3,
[[0, 1, 2]], order)
elif dim == 3:
iseq = LagrangeNodes.append_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_faces(nodes, nts, iseq, 2,
geometry.faces, order)
iseq = LagrangeNodes.append_bubbles(nodes, nts, iseq, 3,
order)
else:
raise NotImplementedError
## print nm.concatenate((nts, nodes), 1)
# Check orders.
orders = nm.sum(nodes, 1)
if not nm.all(orders == order):
raise AssertionError('wrong orders! (%d == all of %s)'
% (order, orders))
# Coordinates of the nodes.
if order == 0:
tmp = nm.ones((n_nod, n_v), nm.int32)
node_coors = nm.dot(tmp, geometry.coors) / n_v
else:
node_coors = nm.dot(nodes, geometry.coors) / order
return nodes, nts, node_coors
def _eval_hessian(self, coors):
"""
Evaluate the second derivatives of the basis.
"""
def get_bc(coor):
rhs = nm.concatenate((coor, [1]))
bc = nm.dot(self.mtx_i, rhs)
return bc
def get_val(bc, node, omit=[]):
val = nm.ones(1, nm.float64)
for i1 in range(bc.shape[0]):
if i1 in omit: continue
for i2 in range(node[i1]):
val *= (self.order * bc[i1] - i2) / (i2 + 1.0)
return val
def get_der(bc1, node1, omit=[]):
val = nm.zeros(1, nm.float64)
for i1 in range(node1):
if i1 in omit: continue
aux = nm.ones(1, nm.float64)
for i2 in range(node1):
if (i1 == i2) or (i2 in omit): continue
aux *= (self.order * bc1 - i2) / (i2 + 1.0)
val += aux * self.order / (i1 + 1.0)
return val
n_v = self.mtx_i.shape[0]
dim = n_v - 1
mi = self.mtx_i[:, :dim]
bfgg = nm.zeros((coors.shape[0], dim, dim, self.n_nod),
dtype=nm.float64)
for ic, coor in enumerate(coors):
bc = get_bc(coor)
for ii, node in enumerate(self.nodes):
for ig1, bc1 in enumerate(bc): # 1. derivative w.r.t. bc1.
for ig2, bc2 in enumerate(bc): # 2. derivative w.r.t. bc2.
if ig1 == ig2:
val = get_val(bc, node, omit=[ig1])
vv = 0.0
for i1 in range(node[ig1]):
aux = get_der(bc2, node[ig2], omit=[i1])
vv += aux * self.order / (i1 + 1.0)
val *= vv
else:
val = get_val(bc, node, omit=[ig1, ig2])
val *= get_der(bc1, node[ig1])
val *= get_der(bc2, node[ig2])
bfgg[ic, :, :, ii] += val * mi[ig1] * mi[ig2][:, None]
return bfgg
class LagrangeSimplexBPolySpace(LagrangeSimplexPolySpace):
"""Lagrange polynomial space with forced bubble function on a simplex
domain."""
name = 'lagrange_simplex_bubble'
def __init__(self, name, geometry, order, init_context=True):
LagrangeSimplexPolySpace.__init__(self, name, geometry, order,
init_context=False)
nodes, nts, node_coors = self.nodes, self.nts, self.node_coors
shape = [nts.shape[0] + 1, 2]
nts = nm.resize(nts, shape)
nts[-1,:] = [3, 0]
shape = [nodes.shape[0] + 1, nodes.shape[1]]
nodes = nm.resize(nodes, shape)
# Make a 'hypercubic' (cubic in 2D) node.
nodes[-1,:] = 1
n_v = self.geometry.n_vertex
tmp = nm.ones((n_v,), nm.int32)
node_coors = nm.vstack((node_coors,
nm.dot(tmp, self.geometry.coors) / n_v))
self.nodes, self.nts = nodes, nts
self.node_coors = nm.ascontiguousarray(node_coors)
self.bnode = nodes[-1:,:]
self.n_nod = self.nodes.shape[0]
if init_context:
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=n_v - 1)
else:
self.eval_ctx = None
def create_context(self, *args, **kwargs):
ctx = LagrangePolySpace.create_context(self, *args, **kwargs)
ctx.is_bubble = 1
return ctx
class LagrangeTensorProductPolySpace(LagrangePolySpace):
"""Lagrange polynomial space on a tensor product domain."""
name = 'lagrange_tensor_product'
def __init__(self, name, geometry, order, init_context=True):
PolySpace.__init__(self, name, geometry, order)
g1d = Struct(n_vertex = 2,
dim = 1,
coors = self.bbox[:,0:1].copy())
self.ps1d = LagrangeSimplexPolySpace('P_aux', g1d, order,
init_context=False)
self.nodes, self.nts, node_coors = self._define_nodes()
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
if init_context:
tdim = int(nm.sqrt(geometry.n_vertex))
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=tdim)
else:
self.eval_ctx = None
def _define_nodes(self):
geometry = self.geometry
order = self.order
n_v, dim = geometry.n_vertex, geometry.dim
vertex_map = order * nm.array(vertex_maps[dim], dtype=nm.int32)
n_nod = (order + 1) ** dim
nodes = nm.zeros((n_nod, 2 * dim), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
if order == 0:
nts[0,:] = [3, 0]
nodes[0,:] = nm.zeros((n_nod,), nm.int32)
else:
iseq = 0
# Vertex nodes.
nts[0:n_v,0] = 0
nts[0:n_v,1] = nm.arange(n_v, dtype=nm.int32)
if dim == 3:
for ii in range(n_v):
i1, i2, i3 = vertex_map[ii]
nodes[iseq,:] = [order - i1, i1,
order - i2, i2,
order - i3, i3]
iseq += 1
elif dim == 2:
for ii in range(n_v):
i1, i2 = vertex_map[ii]
nodes[iseq,:] = [order - i1, i1, order - i2, i2]
iseq += 1
else:
for ii in range(n_v):
i1 = vertex_map[ii][0]
nodes[iseq,:] = [order - i1, i1]
iseq += 1
if dim == 1:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 3,
[[0, 1]], order)
elif dim == 2:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_tp_faces(nodes, nts, iseq, 3,
[[0, 1, 2, 3]], order)
elif dim == 3:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 1,
geometry.edges, order)
iseq = LagrangeNodes.append_tp_faces(nodes, nts, iseq, 2,
geometry.faces, order)
iseq = LagrangeNodes.append_tp_bubbles(nodes, nts, iseq, 3,
order)
else:
raise NotImplementedError
# Check orders.
orders = nm.sum(nodes, 1)
if not nm.all(orders == order * dim):
raise AssertionError('wrong orders! (%d == all of %s)'
% (order * dim, orders))
# Coordinates of the nodes.
if order == 0:
tmp = nm.ones((n_nod, n_v), nm.int32)
node_coors = nm.dot(tmp, geometry.coors) / n_v
else:
c_min, c_max = self.bbox[:,0]
cr = nm.arange(2 * dim)
node_coors = (nodes[:,cr[::2]] * c_min
+ nodes[:,cr[1::2]] * c_max) / order
return nodes, nts, node_coors
def _eval_base_debug(self, coors, diff=False, ori=None,
suppress_errors=False, eps=1e-15):
"""Python version of eval_base()."""
dim = self.geometry.dim
ev = self.ps1d.eval_base
if diff:
base = nm.ones((coors.shape[0], dim, self.n_nod), dtype=nm.float64)
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
for iv in range(dim):
if ii == iv:
base[:,iv:iv+1,:] *= ev(coors[:,ii:ii+1].copy(),
diff=True,
suppress_errors=suppress_errors,
eps=eps)
else:
base[:,iv:iv+1,:] *= ev(coors[:,ii:ii+1].copy(),
diff=False,
suppress_errors=suppress_errors,
eps=eps)
else:
base = nm.ones((coors.shape[0], 1, self.n_nod), dtype=nm.float64)
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
base *= ev(coors[:,ii:ii+1].copy(),
diff=diff,
suppress_errors=suppress_errors,
eps=eps)
return base
def _eval_hessian(self, coors):
"""
Evaluate the second derivatives of the basis.
"""
evh = self.ps1d.eval_base
dim = self.geometry.dim
bfgg = nm.zeros((coors.shape[0], dim, dim, self.n_nod),
dtype=nm.float64)
v0s = []
v1s = []
v2s = []
for ii in range(dim):
self.ps1d.nodes = self.nodes[:,2*ii:2*ii+2].copy()
self.ps1d.n_nod = self.n_nod
ev = self.ps1d.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=1).evaluate
v0s.append(ev(coors[:, ii:ii+1].copy())[:, 0, :])
v1s.append(ev(coors[:, ii:ii+1].copy(), diff=1)[:, 0, :])
v2s.append(evh(coors[:, ii:ii+1], diff=2)[:, 0, 0, :])
for ir in range(dim):
vv = v2s[ir] # Destroys v2s!
for ik in range(dim):
if ik == ir: continue
vv *= v0s[ik]
bfgg[:, ir, ir, :] = vv
for ic in range(dim):
if ic == ir: continue
val = v1s[ir] * v1s[ic]
for ik in range(dim):
if (ik == ir) or (ik == ic): continue
val *= v0s[ik]
bfgg[:, ir, ic, :] += val
return bfgg
def get_mtx_i(self):
return self.ps1d.mtx_i
class SerendipityTensorProductPolySpace(FEPolySpace):
"""
Serendipity polynomial space using Lagrange functions.
Notes
-----
- Orders >= 4 (with bubble functions) are not supported.
- Does not use CLagrangeContext, basis functions are hardcoded.
- `self.nodes`, `self.node_coors` are not used for basis evaluation and
assembling.
"""
name = 'serendipity_tensor_product'
supported_orders = {1, 2, 3}
from sfepy.discrete.fem._serendipity import all_bfs
def __init__(self, name, geometry, order):
import sympy as sm
if geometry.dim < 2:
raise ValueError('serendipity elements need dimension 2 or 3! (%d)'
% geometry.dim)
if order not in self.supported_orders:
raise ValueError('serendipity elements support only orders %s! (%d)'
% (self.supported_orders, order))
PolySpace.__init__(self, name, geometry, order)
self.nodes, self.nts, self.node_coors = self._define_nodes()
self.n_nod = self.nodes.shape[0]
bfs = self.all_bfs[geometry.dim][order]
self.bfs = bfs[0]
self.bfgs = bfs[1]
x, y, z = sm.symbols('x y z')
vs = [x, y, z][:geometry.dim]
self._bfs = [sm.lambdify(vs, bf) for bf in self.bfs]
self._bfgs = [[sm.lambdify(vs, bfg) for bfg in bfgs]
for bfgs in self.bfgs]
def create_context(self, cmesh, eps, check_errors, i_max, newton_eps,
tdim=None):
pass
def _define_nodes(self):
geometry = self.geometry
order = self.order
n_v, dim = geometry.n_vertex, geometry.dim
vertex_map = order * nm.array(vertex_maps[dim], dtype=nm.int32)
# Only for orders 1, 2, 3!
if dim == 2:
n_nod = 4 * self.order
else:
n_nod = 8 + 12 * (self.order - 1)
nodes = nm.zeros((n_nod, 2 * dim), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
if order == 0:
nts[0, :] = [3, 0]
nodes[0, :] = nm.zeros((n_nod,), nm.int32)
else:
iseq = 0
# Vertex nodes.
nts[0:n_v, 0] = 0
nts[0:n_v, 1] = nm.arange(n_v, dtype=nm.int32)
if dim == 3:
for ii in range(n_v):
i1, i2, i3 = vertex_map[ii]
nodes[iseq, :] = [order - i1, i1,
order - i2, i2,
order - i3, i3]
iseq += 1
else: # dim == 2:
for ii in range(n_v):
i1, i2 = vertex_map[ii]
nodes[iseq, :] = [order - i1, i1, order - i2, i2]
iseq += 1
if dim == 2:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 1,
geometry.edges, order)
elif dim == 3:
iseq = LagrangeNodes.append_tp_edges(nodes, nts, iseq, 1,
geometry.edges, order)
else:
raise NotImplementedError
# Coordinates of the nodes.
c_min, c_max = self.bbox[:, 0]
cr = nm.arange(2 * dim)
node_coors = (nodes[:, cr[::2]] * c_min
+ nodes[:, cr[1::2]] * c_max) / order
return nodes, nts, nm.ascontiguousarray(node_coors)
def _eval_base(self, coors, diff=0, ori=None,
suppress_errors=False, eps=1e-15):
"""
See :func:`PolySpace.eval_base()`.
"""
dim = self.geometry.dim
if diff:
bdim = dim
else:
bdim = 1
base = nm.empty((coors.shape[0], bdim, self.n_nod), dtype=nm.float64)
if diff == 0:
for ib, bf in enumerate(self._bfs):
base[:, 0, ib] = bf(*coors.T)
elif diff == 1:
for ib, bfg in enumerate(self._bfgs):
for ig in range(dim):
base[:, ig, ib] = bfg[ig](*coors.T)
else:
raise NotImplementedError
return base
class LobattoTensorProductPolySpace(FEPolySpace):
"""
Hierarchical polynomial space using Lobatto functions.
Each row of the `nodes` attribute defines indices of Lobatto functions that
need to be multiplied together to evaluate the corresponding shape
function. This defines the ordering of basis functions on the reference
element.
"""
name = 'lobatto_tensor_product'
def __init__(self, name, geometry, order):
PolySpace.__init__(self, name, geometry, order)
aux = self._define_nodes()
self.nodes, self.nts, node_coors, self.face_axes, self.sfnodes = aux
self.node_coors = nm.ascontiguousarray(node_coors)
self.n_nod = self.nodes.shape[0]
aux = nm.where(self.nodes > 0, self.nodes, 1)
self.node_orders = nm.prod(aux, axis=1)
self.edge_indx = nm.where(self.nts[:, 0] == 1)[0]
self.face_indx = nm.where(self.nts[:, 0] == 2)[0]
self.face_axes_nodes = self._get_face_axes_nodes(self.face_axes)
def _get_counts(self):
order = self.order
dim = self.geometry.dim
n_nod = (order + 1) ** dim
n_per_edge = (order - 1)
n_per_face = (order - 1) ** (dim - 1)
n_bubble = (order - 1) ** dim
return n_nod, n_per_edge, n_per_face, n_bubble
def _define_nodes(self):
geometry = self.geometry
order = self.order
n_v, dim = geometry.n_vertex, geometry.dim
n_nod, n_per_edge, n_per_face, n_bubble = self._get_counts()
nodes = nm.zeros((n_nod, dim), nm.int32)
nts = nm.zeros((n_nod, 2), nm.int32)
# Vertex nodes.
nts[0:n_v, 0] = 0
nts[0:n_v, 1] = nm.arange(n_v, dtype=nm.int32)
nodes[0:n_v] = nm.array(vertex_maps[dim], dtype=nm.int32)
ii = n_v
# Edge nodes.
if (dim > 1) and (n_per_edge > 0):
ik = nm.arange(2, order + 1, dtype=nm.int32)
zo = nm.zeros((n_per_edge, 2), dtype=nm.int32)
zo[:, 1] = 1
for ie, edge in enumerate(geometry.edges):
n1, n2 = nodes[edge]
ifix = nm.where(n1 == n2)[0]
irun = nm.where(n1 != n2)[0][0]
ic = n1[ifix]
nodes[ii:ii + n_per_edge, ifix] = zo[:, ic]
nodes[ii:ii + n_per_edge, irun] = ik
nts[ii:ii + n_per_edge] = [[1, ie]]
ii += n_per_edge
# 3D face nodes.
face_axes = []
sfnodes = None
if (dim == 3) and (n_per_face > 0):
n_face = len(geometry.faces)
sfnodes = nm.zeros((n_per_face * n_face, dim), nm.int32)
ii0 = ii
ik = nm.arange(2, order + 1, dtype=nm.int32)
zo = nm.zeros((n_per_face, 2), dtype=nm.int32)
zo[:, 1] = 1
for ifa, face in enumerate(geometry.faces):
ns = nodes[face]
diff = nm.diff(ns, axis=0)
asum = nm.abs(diff).sum(axis=0)
ifix = nm.where(asum == 0)[0][0]
ic = ns[0, ifix]
irun1 = nm.where(asum == 2)[0][0]
irun2 = nm.where(asum == 1)[0][0]
iy, ix = nm.meshgrid(ik, ik)
nodes[ii:ii + n_per_face, ifix] = zo[:, ic]
nodes[ii:ii + n_per_face, irun1] = ix.ravel()
nodes[ii:ii + n_per_face, irun2] = iy.ravel()
nts[ii:ii + n_per_face] = [[2, ifa]]
ij = ii - ii0
sfnodes[ij:ij + n_per_face, ifix] = zo[:, ic]
sfnodes[ij:ij + n_per_face, irun1] = iy.ravel()
sfnodes[ij:ij + n_per_face, irun2] = ix.ravel()
face_axes.append([irun1, irun2])
ii += n_per_face
face_axes = nm.array(face_axes)
# Bubble nodes.
if n_bubble > 0:
ik = nm.arange(2, order + 1, dtype=nm.int32)
nodes[ii:] = nm.array([aux for aux in combine([ik] * dim)])
nts[ii:ii + n_bubble] = [[3, 0]]
ii += n_bubble
assert_(ii == n_nod)
# Coordinates of the "nodes". All nodes on a facet have the same
# coordinates - the centre of the facet.
c_min, c_max = self.bbox[:, 0]
node_coors = nm.zeros(nodes.shape, dtype=nm.float64)
node_coors[:n_v] = nodes[:n_v]
if (dim > 1) and (n_per_edge > 0):
ie = nm.where(nts[:, 0] == 1)[0]
node_coors[ie] = node_coors[geometry.edges[nts[ie, 1]]].mean(1)
if (dim == 3) and (n_per_face > 0):
ifa = nm.where(nts[:, 0] == 2)[0]
node_coors[ifa] = node_coors[geometry.faces[nts[ifa, 1]]].mean(1)
if n_bubble > 0:
ib = nm.where(nts[:, 0] == 3)[0]
node_coors[ib] = node_coors[geometry.conn].mean(0)
return nodes, nts, node_coors, face_axes, sfnodes
def _get_face_axes_nodes(self, face_axes):
if not len(face_axes): return None
nodes = self.nodes[self.face_indx]
n_per_face = self._get_counts()[2]
anodes = nm.tile(nodes[:n_per_face, face_axes[0]], (6, 1))
return anodes
def _eval_base(self, coors, diff=False, ori=None,
suppress_errors=False, eps=1e-15):
"""
See PolySpace.eval_base().
"""
from .extmods.lobatto_bases import eval_lobatto_tensor_product as ev
c_min, c_max = self.bbox[:, 0]
base = ev(coors, self.nodes, c_min, c_max, self.order, diff)
if ori is not None:
ebase = nm.tile(base, (ori.shape[0], 1, 1, 1))
if self.edge_indx.shape[0]:
# Orient edge functions.
ie, ii = nm.where(ori[:, self.edge_indx] == 1)
ii = self.edge_indx[ii]
ebase[ie, :, :, ii] *= -1.0
if self.face_indx.shape[0]:
# Orient face functions.
fori = ori[:, self.face_indx]
# ... normal axis order
ie, ii = nm.where((fori == 1) | (fori == 2))
ii = self.face_indx[ii]
ebase[ie, :, :, ii] *= -1.0
# ... swapped axis order
sbase = ev(coors, self.sfnodes, c_min, c_max, self.order, diff)
sbase = insert_strided_axis(sbase, 0, ori.shape[0])
# ...overwrite with swapped axes basis.
ie, ii = nm.where(fori >= 4)
ii2 = self.face_indx[ii]
ebase[ie, :, :, ii2] = sbase[ie, :, :, ii]
# ...deal with orientation.
ie, ii = nm.where((fori == 5) | (fori == 6))
ii = self.face_indx[ii]
ebase[ie, :, :, ii] *= -1.0
base = ebase
return base
class BernsteinSimplexPolySpace(FEPolySpace):
"""
Bernstein polynomial space on simplex domains.
Notes
-----
Naive proof-of-concept implementation, does not use recurrent formulas or
Duffy transformation to obtain tensor product structure.
"""
name = 'bernstein_simplex'
def __init__(self, name, geometry, order):
PolySpace.__init__(self, name, geometry, order)
self.nodes, self.nts, self.node_coors = self._define_nodes()
self.n_nod = self.nodes.shape[0]
self.eval_ctx = None
def _define_nodes(self):
nodes, nts, node_coors = LagrangeSimplexPolySpace._define_nodes(self)
return nodes, nts, node_coors
@staticmethod
def _get_barycentric(coors):
dim = coors.shape[1]
bcoors = nm.empty((coors.shape[0], dim + 1))
bcoors[:, 0] = 1.0 - coors.sum(axis=1)
bcoors[:, 1:] = coors
return bcoors
def _eval_base(self, coors, diff=False, ori=None,
suppress_errors=False, eps=1e-15):
"""
See PolySpace.eval_base().
"""
from scipy.special import factorial
dim = self.geometry.dim
if diff:
bdim = dim
bgrad = nm.zeros((dim + 1, dim), dtype=nm.float64)
bgrad[0] = -1
bgrad[1:] = nm.eye(dim)
else:
bdim = 1
base = nm.ones((coors.shape[0], bdim, self.n_nod), dtype=nm.float64)
bcoors = self._get_barycentric(coors)
fs = factorial(nm.arange(0, self.order + 1))
of = fs[-1]
if not diff:
for ii, node in enumerate(self.nodes):
coef = of / nm.prod(fs[node])
val = coef * nm.prod(nm.power(bcoors, node), axis=1)
base[:, 0, ii] = val
else:
for ii, node in enumerate(self.nodes):
coef = of / nm.prod(fs[node])
for ider in range(dim):
dval = 0.0
for ib in range(dim + 1):
ex = node[ib]
val = coef
for im in range(dim + 1):
if ib == im:
val *= (ex *
nm.power(bcoors[:, im], ex - 1) *
bgrad[ib, ider])
else:
val *= nm.power(bcoors[:, im], node[im])
dval += val
base[:, ider, ii] = dval
return base
class BernsteinTensorProductPolySpace(FEPolySpace):
"""
Bernstein polynomial space.
Each row of the `nodes` attribute defines indices of 1D Bernstein basis
functions that need to be multiplied together to evaluate the corresponding
shape function. This defines the ordering of basis functions on the
reference element.
"""
name = 'bernstein_tensor_product'
def __init__(self, name, geometry, order):
PolySpace.__init__(self, name, geometry, order)
self.nodes, self.nts, self.node_coors = self._define_nodes()
self.n_nod = self.nodes.shape[0]
self.eval_ctx = None
def _define_nodes(self):
nn, nts, node_coors = LagrangeTensorProductPolySpace._define_nodes(self)
nodes = nn[:, 1::2]
return nodes, nts, node_coors
def _eval_base(self, coors, diff=False, ori=None,
suppress_errors=False, eps=1e-15):
"""
See PolySpace.eval_base().
"""
from sfepy.discrete.iga.extmods.igac import eval_bernstein_basis as ev
dim = self.geometry.dim
if diff:
bdim = dim
else:
bdim = 1
base = nm.ones((coors.shape[0], bdim, self.n_nod), dtype=nm.float64)
degree = self.order
n_efuns_max = degree + 1
for iq, qp in enumerate(coors):
B = nm.empty((dim, n_efuns_max), dtype=nm.float64)
dB_dxi = nm.empty((dim, n_efuns_max), dtype=nm.float64)
for ii in range(dim):
ev(B[ii, :], dB_dxi[ii, :], qp[ii], degree)
if not diff:
for ii, ni in enumerate(self.nodes.T):
base[iq, 0, :] *= B[ii, ni]
else:
for ii, ni in enumerate(self.nodes.T):
for iv in range(bdim):
if ii == iv:
base[iq, iv, :] *= dB_dxi[ii, ni]
else:
base[iq, iv, :] *= B[ii, ni]
return base
| |
from flask import Flask, jsonify
from datetime import datetime, timedelta
from DBManager import DBManager
from Scraper import Scraper
from multiprocessing.pool import ThreadPool
import requests
import json
import time
app = Flask(__name__)
apiUrl = "http://ergast.com/api/f1/current"
@app.route('/')
def homepage():
return "Backend for F1 Portal app"
# Obtaining the drivers and constructors standings data
@app.route('/get_standings')
def get_standings():
# Obtaining cached standings data from database
entry = DBManager.get_standings_entry()
standings_json = entry[0][0]
# If standings data not in database, refresh from API and cache in DB
if standings_json == '':
print("Cached Standings file not found.")
return jsonify(get_standings_from_api(None))
# Obtaining the expiry date of the cached standings data
refresh_date_raw = standings_json["expiryDate"]
refresh_date = datetime.strptime(refresh_date_raw,
'%Y-%m-%dT%H:%M:%SZ')
refresh_date = refresh_date + timedelta(hours=2)
# Check to obtain from cache or refresh from API and re-cache based on
# cache's expiry date
if datetime.utcnow() > refresh_date:
print("Cached standings file out of date.")
return jsonify(get_standings_from_api(standings_json))
else:
print("Obtained standings from cached file")
return jsonify(standings_json)
# Obtaining the drivers and constructors standings data from Ergast API and
# caching to database
def get_standings_from_api(old_standings_json):
# Obtaining drivers json from API
new_standings_json = {}
drivers_standings = requests.get(apiUrl + '/driverStandings.json')
driver_json = drivers_standings.json()
new_standings_json["driver_standings"] = driver_json
# Checking if API standings are updated after race has ended
if old_standings_json is not None:
old_round = int(old_standings_json["driver_standings"]["MRData"]
["StandingsTable"]["StandingsLists"][0]["round"])
new_round = int(new_standings_json["driver_standings"]["MRData"]
["StandingsTable"]["StandingsLists"][0]["round"])
# If API standings are not yet updated, return old standings data with
# the same expiry so this check will be conducted again next time
if old_round == new_round:
print("API standings are not yet updated. Using old cached data.")
return old_standings_json
# Obtaining Constructors json from API
constructor_standings = requests.get(apiUrl + '/constructorStandings.json')
constructor_json = constructor_standings.json()
new_standings_json["constructor_standings"] = constructor_json
# Adding expiry date to standings json file to aid Caching
# by finding next race to add expiry info to json
schedule_json = json.loads(get_schedule().data)
races_json = schedule_json["MRData"]["RaceTable"]["Races"]
curr_date = datetime.utcnow()
for race in races_json:
# Obtain race date and time
race_date_raw = race["date"] + "T" + race["time"]
race_date = datetime.strptime(race_date_raw, '%Y-%m-%dT%H:%M:%SZ')
# If race date has not elapsed for the current race in the ordered
# list, then set json to be that race date
if curr_date < race_date:
new_standings_json["expiryDate"] = race_date.strftime(
'%Y-%m-%dT%H:%M:%SZ')
break
# Update cached standings file in database
DBManager.update_standings_entry(new_standings_json)
print("Updated standings from API")
return new_standings_json
# Obtaining the current season's race schedule
@app.route('/get_schedule')
def get_schedule():
# Obtaining cached schedule data from database
entry = DBManager.get_schedule_entry()
schedule_data = entry[0][0]
# If standings data not in database, refresh from API and cache in DB
if schedule_data == '':
print("Cached Schedule file not found.")
return jsonify(get_schedule_from_api())
# If data isn't for the current season, update it from API and re-cache
# in database
json_season = int(schedule_data["MRData"]["RaceTable"]["season"])
current_season = datetime.now().year
if json_season != current_season:
print("Cached Schedule file out of date. "
"Cached season = " + str(json_season) +
", Current season = " + str(current_season))
return jsonify(get_schedule_from_api())
# Otherwise, return cached data
print("Obtained season schedule from cached file")
return jsonify(schedule_data)
# Obtaining the current season's race schedule from ErgastAPI and caching it
# in the database
def get_schedule_from_api():
# Obtain cached race schedule form API
response = requests.get(apiUrl + ".json")
new_schedule_data = response.json()
# Add image per track to data
new_schedule_data = add_images_to_schedule(new_schedule_data)
# Cache new data in database
DBManager.update_schedule_entry(new_schedule_data)
print("Updated season schedule for new season from API")
return new_schedule_data
# Adding track images to every race in the season schedule/calendar
def add_images_to_schedule(new_schedule_data):
track_image_url = {
"Australia": "https://www.imageupload.co.uk/images/2017/07/28/"
"australian.jpg",
"China": "https://www.imageupload.co.uk/images/2017/07/28/"
"chinese.jpg",
"Bahrain": "https://www.imageupload.co.uk/images/2017/07/28/"
"bahrain.jpg",
"Russia": "https://www.imageupload.co.uk/images/2017/07/28/"
"russian.jpg",
"Spain": "https://www.imageupload.co.uk/images/2017/07/28/"
"spanish.jpg",
"Monaco": "https://www.imageupload.co.uk/images/2017/07/28/"
"monaco.jpg",
"Canada": "https://www.imageupload.co.uk/images/2017/07/28/"
"canadian.jpg",
"Azerbaijan": "https://www.imageupload.co.uk/images/2017/07/28/"
"azerbaijan.jpg",
"Austria": "https://www.imageupload.co.uk/images/2017/07/28/"
"austrian.jpg",
"UK": "https://www.imageupload.co.uk/images/2017/07/28/"
"british.jpg",
"Hungary": "https://www.imageupload.co.uk/images/2017/07/28/"
"hungary.jpg",
"Belgium": "https://www.imageupload.co.uk/images/2017/07/28/"
"belgian.jpg",
"Italy": "https://www.imageupload.co.uk/images/2017/07/28/"
"italian.jpg",
"Singapore": "https://www.imageupload.co.uk/images/2017/07/28/"
"singapore.jpg",
"Malaysia": "https://www.imageupload.co.uk/images/2017/07/28/"
"malaysian.jpg",
"Japan": "https://www.imageupload.co.uk/images/2017/07/28/"
"japanese.jpg",
"USA": "https://www.imageupload.co.uk/images/2017/07/28/"
"american2.jpg",
"Mexico": "https://www.imageupload.co.uk/images/2017/07/28/"
"mexican297780.png",
"Brazil": "https://www.imageupload.co.uk/images/2017/07/28/"
"brazilian.jpg",
"UAE": "https://www.imageupload.co.uk/images/2017/07/28/uae2.jpg",
}
race_list = new_schedule_data["MRData"]["RaceTable"]["Races"]
for track in race_list:
track_country = track["Circuit"]["Location"]["country"]
image_url = track_image_url.get(track_country
, "https://www.imageupload.co.uk/"
"image/DPb2")
track["Circuit"]["imageURL"] = image_url
return new_schedule_data
# Get showtimes of weekend from cache or scraper
def get_showtimes(season, url, race_country):
# Obtaining cached standings data from database
entry = DBManager.get_showtimes_entry(race_country)
# Check cached data exists and is from the current season to be
# valid. If valid, then return
if entry:
cached_showtimes_data = entry[0][0]
if cached_showtimes_data:
json_year = cached_showtimes_data['year']
if json_year == season:
print("Showtimes obtained from cache")
return cached_showtimes_data
# Scrape showtimes from url website
showtimes_data = Scraper.scrape_showtimes(season, url)
if showtimes_data == {}:
print("Showtimes unavailable as session has elapsed")
return showtimes_data
# Add year to showtimes data to depict season
showtimes_data['year'] = season
# Update cached showtimes file in database
DBManager.update_showtimes_entry(race_country, showtimes_data)
print("Showtimes obtained from website")
return showtimes_data
# Get session results from cache or scraper
def get_session_results(url, race_country, session_name, year):
# Obtain cached results from database
entry = DBManager.get_session_results_entry(race_country, session_name)
# Check if valid using year/season and if empty. If valid, return
if entry:
cached_session_results = entry[0][0]
if cached_session_results:
json_year = cached_session_results['year']
if json_year == year:
print(session_name + " results obtained from cache")
return cached_session_results
# Otherwise, scrape
session_results = {}
if session_name[:2] == 'fp':
session_results = Scraper.scrape_practice_results(url)
elif session_name[0] == 'q':
session_results = Scraper.scrape_qualifying_results(url)
else:
session_results = Scraper.scrape_race_results(url)
# Add year to showtimes data to depict season
session_results['year'] = year
# Update cached showtimes file in database
DBManager.update_session_results_entry(race_country,
session_name,
session_results)
print("Showtimes obtained from website")
return session_results
@app.route('/get_results/<string:season>/<string:race_country>')
def get_results(season, race_country):
# Constructing URLs---------------------------------------------------------
# Constructing URL to scrape results
# e.g. http://www.skysports.com/f1/grandprix/australia/results/2017/
# qualifying-1
# url only work with lowercase countries
race_country = race_country.lower()
# Special case
if race_country == "uae":
race_country = "unitedarabemirates"
url = "http://www.skysports.com/f1/grandprix/"
url += race_country + "/results/" + season + "/"
# Practice results URL
p1_url = url + "/practice-1"
p2_url = url + "/practice-2"
p3_url = url + "/practice-3"
# Qualifying results URL
q1_url = url + "/qualifying-1"
q2_url = url + "/qualifying-2"
q3_url = url + "/qualifying-3"
# Race results URL
race_url = url + "/race"
# Generating a results URL list
sessions = ["fp1", "fp2", "fp3", "q1", "q2", "q3", "race"]
urls = [p1_url, p2_url, p3_url, q1_url, q2_url, q3_url, race_url]
# Constructing URL to scrape showtimes
# e.g. "http://www.skysports.com/watch/f1-on-sky/grand-prix/italy"
# Special case
if race_country == "unitedarabemirates":
race_country = "abu-dhabi"
showtimes_url = "http://www.skysports.com/watch/f1-on-sky/grand-prix/"
showtimes_url += race_country
# Scraping and populating results JSON--------------------------------------
results_json = {}
pool = ThreadPool(processes=9)
# Obtain showtimes for all sessions
# showtimes_json = Scraper.scrape_showtimes(season, url, race_country)
showtimes_json = get_showtimes(season, url, race_country)
# Calculating the next session's ID(can have value in range 0-7). Tells
# us the max session we need to obtain results for to avoid redundant
# scraping/reading from cache when weekend is in progress.
# TODO: Replace IDs and sessions with an enum class
next_session_id = 0
# By default, all sessions need to be obtained from cache in case weekend
# has elapsed. So set the id to be the maximum.
if not showtimes_json:
next_session_id = 7
# If weekend is in progress or hasn't started, can prune sessions to be
# obtained from cache/website
else:
curr_timestamp = datetime.utcnow()
for st in showtimes_json:
if st in sessions:
next_session_id += 1
session_time_raw = showtimes_json[st]
session_time = datetime.strptime(session_time_raw,
'%Y-%m-%dT%H:%M:%S')
# If session hasn't elapsed. This is the next session as we are
# iterating in ascending order of sessions in list so break
if session_time > curr_timestamp:
break
# Submitting tasks to execute concurrently
tasks = []
for i in range(next_session_id):
session_name = sessions[i]
tasks.append(pool.apply_async(get_session_results, (urls[i],
race_country,
session_name,
season)))
# Waiting for executing tasks to obtain JSON results and then populating
# results JSON with the obtained results------------------------------------
for i in range(len(sessions)):
# If session has been submitted to be scraped
if i < len(tasks):
get = tasks[i].get()
results_json[sessions[i]] = get
else:
results_json[sessions[i]] = ""
# Obtain latest session based on results published. Cannot use
# next_session_id as we could be waiting for current session results to be
# published after it has elapsed which that variable doesnt account for.
latest_session = "fp1"
for session in results_json:
session_data = results_json[session]["timesheet"]
if session_data:
latest_session = session
else:
break
if latest_session == "race":
results_json["latestSessionType"] = latest_session
results_json["latestSessionNum"] = "1"
else:
results_json["latestSessionType"] = latest_session[:-1]
results_json["latestSessionNum"] = latest_session[-1:]
results_json.update(showtimes_json)
pool.close()
return jsonify(results_json)
# Tester function for quick debugging
@app.route('/test')
def test():
return jsonify(get_standings_from_api(None))
if __name__ == '__main__':
app.run()
| |
#!/usr/bin/env python
"""
Unit test for the Gps2Udp server.
"""
import gps2udp
import hashlib
import os
import time
import unittest
class TestGps2UdpServer(unittest.TestCase):
"""
Unit test for the Gps2Udp server internal functions.
"""
def test_parse_packet(self):
"""
Test the gps2udp.parse_packet() function
"""
# reset internal state of the gps2udp module
gps2udp.LAST_TIMESTAMP = None
# bad timestamp
time_from_the_past = int(time.time()) - gps2udp.MAX_TIME_DIFF * 2
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(time_from_the_past) + ' 1.1 2.2 2')
time_from_the_future = int(time.time()) + gps2udp.MAX_TIME_DIFF * 2
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(time_from_the_future) + ' 1.1 2.2 2')
# bad token number
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet, '')
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(int(time.time())) + ' 1.1 2.2')
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(int(time.time())) + ' 1.1')
# bad latitude
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(int(time.time())) + ' z 2.2 2')
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(int(time.time())) + ' -91.0 2.2 2')
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(int(time.time())) + ' 91.0 2.2 2')
# bad longitude
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(int(time.time())) + ' 1.1 z 2')
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(int(time.time())) + ' 1.1 -181.0 2')
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(int(time.time())) + ' 1.1 181 2')
# bad accuracy
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(int(time.time())) + ' 1.1 2.2 z')
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(int(time.time())) + ' 1.1 2.2 3.3')
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(int(time.time())) + ' 1.1 2.2 -1')
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
(str(int(time.time())) + ' 1.1 2.2 ' +
str(gps2udp.MAX_ACCURACY + 1)))
# first valid packet
now = int(time.time())
self.assertDictEqual(
{'timestamp': now,
'latitude': 1.1,
'longitude': 2.2,
'accuracy': 3},
gps2udp.parse_packet('%r 1.1 2.2 3' % now))
# second valid packet (timestamp greater than previous)
self.assertDictEqual(
{'timestamp': now + 1,
'latitude': 1.1,
'longitude': 2.2,
'accuracy': 3},
gps2udp.parse_packet('%r 1.1 2.2 3' % (now + 1)))
# not valid packet (timestamp is less than previous)
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
'%r 1.1 2.2 3' % now)
# not valid packet (timestamp is equal with previous)
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
'%r 1.1 2.2 3' % (now + 1))
# valid packet again (timestamp is greater than previous valid)
self.assertDictEqual(
{'timestamp': now + 2,
'latitude': 1.1,
'longitude': 2.2,
'accuracy': 3},
gps2udp.parse_packet('%r 1.1 2.2 3' % (now + 2)))
# bad packet: timestamp is greater than previous, but
# too big to satisfy MAX_TIME_DIFF
time_from_the_future = int(time.time()) + gps2udp.MAX_TIME_DIFF * 2
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(time_from_the_future) + ' 1.1 2.2 3')
def test_parse_packet_signed(self):
"""
Test the gps2udp.parse_packet() function
in the SIGNED mode.
"""
# reset internal state of the gps2udp module
gps2udp.LAST_TIMESTAMP = None
# set the secret
secret = sha1(str(time.time()))
os.environ['GPS2UDP_SECRET'] = secret
now = int(time.time())
# bad packet (without the digest)
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(now) + ' 1.1 2.2 3',
signed = True)
# bad packet (digest is bad)
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
str(now) + ' 1.1 2.2 3 bad_digest',
signed = True)
# good packet
payload = str(now) + ' 1.1 2.2 3'
self.assertDictEqual(
{'timestamp': now,
'latitude': 1.1,
'longitude': 2.2,
'accuracy': 3},
gps2udp.parse_packet(
payload + ' ' + sha1(payload + secret),
signed = True))
# another good packet
payload = str(now + 1) + ' 1.1 2.2 3'
self.assertDictEqual(
{'timestamp': now + 1,
'latitude': 1.1,
'longitude': 2.2,
'accuracy': 3},
gps2udp.parse_packet(
payload + ' ' + sha1(payload + secret),
signed = True))
# bad packet again
payload = str(now + 2) + ' 1.1 2.2 3'
self.assertRaises(
gps2udp.PacketParseError,
gps2udp.parse_packet,
payload + ' ' + sha1(payload + secret) + 'erroneous',
signed = True)
# another valid one, without trailing mess
self.assertDictEqual(
{'timestamp': now + 2,
'latitude': 1.1,
'longitude': 2.2,
'accuracy': 3},
gps2udp.parse_packet(
payload + ' ' + sha1(payload + secret),
signed = True))
def test_format_packet(self):
"""
Test the gps2udp.parse_packet() function
"""
self.assertEqual(
'123456 2.3456789 3.4567890 456\n',
gps2udp.format_packet(
{'timestamp': 123456,
'latitude': 2.3456789012,
'longitude': 3.4567890123,
'accuracy': 456}))
self.assertEqual(
'654321 -2.0000000 3.4567890 456\n',
gps2udp.format_packet(
{'timestamp': 654321,
'latitude': -2,
'longitude': 3.4567890123,
'accuracy': 456}))
def sha1(data):
"""
Return SHA1 digest for the string
"""
h = hashlib.sha1()
h.update(data)
return h.hexdigest()
if __name__ == '__main__':
unittest.main(verbosity = 2)
| |
#!/usr/bin/env python
"""Ninja toolchain abstraction for XCode toolchain"""
import os
import subprocess
import toolchain
import syntax
def make_target(toolchain, host, target):
return XCode(toolchain, host, target)
class XCode(object):
def __init__(self, toolchain, host, target):
self.toolchain = toolchain
self.host = host
self.target = target
def initialize_toolchain(self):
self.organisation = ''
self.bundleidentifier = ''
self.provisioning = ''
if self.target.is_macos():
self.deploymenttarget = '12.0'
elif self.target.is_ios():
self.deploymenttarget = '15.0'
def build_toolchain(self):
if self.target.is_macos():
sdk = 'macosx'
deploytarget = 'MACOSX_DEPLOYMENT_TARGET=' + self.deploymenttarget
elif self.target.is_ios():
sdk = 'iphoneos'
deploytarget = 'IPHONEOS_DEPLOYMENT_TARGET=' + self.deploymenttarget
platformpath = toolchain.check_last_output(['xcrun', '--sdk', sdk, '--show-sdk-platform-path'])
localpath = platformpath + "/Developer/usr/bin:/Applications/Xcode.app/Contents/Developer/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin"
self.plist = "PATH=" + localpath + " " + toolchain.check_last_output(['xcrun', '--sdk', sdk, '-f', 'plutil'])
self.xcassets = "PATH=" + localpath + " " + toolchain.check_last_output(['xcrun', '--sdk', sdk, '-f', 'actool'])
self.xib = "PATH=" + localpath + " " + toolchain.check_last_output(['xcrun', '--sdk', sdk, '-f', 'ibtool'])
self.dsymutil = "PATH=" + localpath + " " + toolchain.check_last_output(['xcrun', '--sdk', sdk, '-f', 'dsymutil'])
self.plistcmd = 'build/ninja/plist.py --exename $exename --prodname $prodname --bundle $bundleidentifier --target $target --deploymenttarget $deploymenttarget --output $outpath $in'
if self.target.is_macos():
self.xcassetscmd = 'mkdir -p $outpath && $xcassets --output-format human-readable-text --output-partial-info-plist $outplist' \
' --app-icon AppIcon --launch-image LaunchImage --platform macosx --minimum-deployment-target ' + self.deploymenttarget + \
' --target-device mac --compress-pngs --compile $outpath $in >/dev/null'
self.xibcmd = '$xib --target-device mac --module $module --minimum-deployment-target ' + self.deploymenttarget + \
' --output-partial-info-plist $outplist --auto-activate-custom-fonts' \
' --output-format human-readable-text --compile $outpath $in'
elif self.target.is_ios():
self.xcassetscmd = 'mkdir -p $outpath && $xcassets --output-format human-readable-text --output-partial-info-plist $outplist' \
' --app-icon AppIcon --launch-image LaunchImage --platform iphoneos --minimum-deployment-target ' + self.deploymenttarget + \
' --target-device iphone --target-device ipad --compress-pngs --compile $outpath $in >/dev/null'
self.xibcmd = '$xib --target-device iphone --target-device ipad --module $module --minimum-deployment-target ' + self.deploymenttarget + \
' --output-partial-info-plist $outplist --auto-activate-custom-fonts' \
' --output-format human-readable-text --compile $outpath $in &> /dev/null '
self.dsymutilcmd = '$dsymutil $in -o $outpath'
self.codesigncmd = 'build/ninja/codesign.py --target $target --prefs codesign.json --builddir $builddir --binname $binname --config $config --entitlements $entitlements $outpath'
def parse_default_variables(self, variables):
if not variables:
return
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
if key == 'deploymenttarget':
self.deploymenttarget = val
if key == 'organisation':
self.organisation = val
if key == 'bundleidentifier':
self.bundleidentifier = val
if key == 'provisioning':
self.provisioning = val
def parse_prefs(self, prefs):
if self.target.is_ios() and 'ios' in prefs:
iosprefs = prefs['ios']
if 'deploymenttarget' in iosprefs:
self.deploymenttarget = iosprefs['deploymenttarget']
if 'organisation' in iosprefs:
self.organisation = iosprefs['organisation']
if 'bundleidentifier' in iosprefs:
self.bundleidentifier = iosprefs['bundleidentifier']
if 'provisioning' in iosprefs:
self.provisioning = iosprefs['provisioning']
elif self.target.is_macos() and 'macos' in prefs:
macosprefs = prefs['macos']
if 'deploymenttarget' in macosprefs:
self.deploymenttarget = macosprefs['deploymenttarget']
if 'organisation' in macosprefs:
self.organisation = macosprefs['organisation']
if 'bundleidentifier' in macosprefs:
self.bundleidentifier = macosprefs['bundleidentifier']
if 'provisioning' in macosprefs:
self.provisioning = macosprefs['provisioning']
def write_variables(self, writer):
writer.variable('plist', self.plist)
writer.variable('xcassets', self.xcassets)
writer.variable('xib', self.xib)
writer.variable('dsymutil', self.dsymutil)
writer.variable('bundleidentifier', syntax.escape(self.bundleidentifier))
writer.variable('deploymenttarget', self.deploymenttarget)
writer.variable('entitlements', 'none')
def write_rules(self, writer):
writer.rule('dsymutil', command = self.dsymutilcmd, description = 'DSYMUTIL $outpath')
writer.rule('plist', command = self.plistcmd, description = 'PLIST $outpath')
writer.rule('xcassets', command = self.xcassetscmd, description = 'XCASSETS $outpath')
writer.rule('xib', command = self.xibcmd, description = 'XIB $outpath')
writer.rule('codesign', command = self.codesigncmd, description = 'CODESIGN $outpath')
def make_bundleidentifier(self, binname):
return self.bundleidentifier.replace('$(binname)', binname)
def app(self, toolchain, writer, module, archbins, outpath, binname, basepath, config, implicit_deps, resources, codesign):
#Outputs
builtbin = []
builtres = []
builtsym = []
#Paths
builddir = os.path.join('$buildpath', config, 'app', binname)
configpath = os.path.join(outpath, config)
apppath = os.path.join(configpath, binname + '.app')
dsympath = os.path.join(outpath, config, binname + '.dSYM')
#Extract debug symbols from universal binary
dsymcontentpath = os.path.join(dsympath, 'Contents')
builtsym = writer.build([os.path.join(dsymcontentpath, 'Resources', 'DWARF', binname), os.path.join(dsymcontentpath, 'Resources', 'DWARF' ), os.path.join(dsymcontentpath, 'Resources'), os.path.join(dsymcontentpath, 'Info.plist'), dsymcontentpath, dsympath], 'dsymutil', archbins[config], variables = [('outpath', dsympath)])
#Copy final universal binary
if self.target.is_ios():
builtbin = toolchain.copy(writer, archbins[config], os.path.join(apppath, toolchain.binprefix + binname + toolchain.binext))
else:
builtbin = toolchain.copy(writer, archbins[config], os.path.join(apppath, 'Contents', 'MacOS', toolchain.binprefix + binname + toolchain.binext))
#Build resources
if resources:
has_resources = False
#Lists of input plists and partial plist files produced by resources
plists = []
assetsplists = []
xibplists = []
entitlements = []
#All resource output files
outfiles = []
#First build everything except plist inputs
for resource in resources:
if resource.endswith('.xcassets'):
if self.target.is_macos():
assetsvars = [('outpath', os.path.join(os.getcwd(), apppath, 'Contents', 'Resources'))]
else:
assetsvars = [('outpath', apppath)]
outplist = os.path.join(os.getcwd(), builddir, os.path.splitext(os.path.basename(resource))[0] + '-xcassets.plist')
assetsvars += [('outplist', outplist)]
outfiles = [outplist]
if self.target.is_macos():
outfiles += [os.path.join(os.getcwd(), apppath, 'Contents', 'Resources', 'AppIcon.icns')]
elif self.target.is_ios():
pass #TODO: Need to list all icon and launch image files here
assetsplists += writer.build(outfiles, 'xcassets', os.path.join(os.getcwd(), basepath, module, resource), variables = assetsvars)
has_resources = True
elif resource.endswith('.xib'):
xibmodule = binname.replace('-', '_').replace('.', '_')
if self.target.is_macos():
nibpath = os.path.join(apppath, 'Contents', 'Resources', os.path.splitext(os.path.basename(resource))[0] + '.nib')
else:
nibpath = os.path.join(apppath, os.path.splitext(os.path.basename(resource))[0] + '.nib')
plistpath = os.path.join(builddir, os.path.splitext(os.path.basename(resource))[0] + '-xib.plist')
xibplists += [plistpath]
outfiles = []
if self.target.is_ios():
outfiles += [os.path.join(nibpath, 'objects.nib'), os.path.join(nibpath, 'objects-8.0+.nib'), os.path.join(nibpath, 'runtime.nib')]
outfiles += [nibpath, plistpath]
builtres += writer.build(outfiles, 'xib', os.path.join(basepath, module, resource), variables = [('outpath', nibpath), ('outplist', plistpath), ('module', xibmodule)])
has_resources = True
elif resource.endswith('.plist'):
plists += [os.path.join(basepath, module, resource)]
elif resource.endswith('.entitlements'):
entitlements += [os.path.join(basepath, module, resource)]
#Extra output files/directories
outfiles = []
if has_resources and self.target.is_macos():
outfiles += [os.path.join(apppath, 'Contents', 'Resources')]
#Now build input plists appending partial plists created by previous resources
if self.target.is_macos():
plistpath = os.path.join(apppath, 'Contents', 'Info.plist')
pkginfopath = os.path.join(apppath, 'Contents', 'PkgInfo')
else:
plistpath = os.path.join(apppath, 'Info.plist')
pkginfopath = os.path.join(apppath, 'PkgInfo')
plistvars = [('exename', binname), ('prodname', binname), ('outpath', plistpath)]
bundleidentifier = self.make_bundleidentifier(binname)
if bundleidentifier != '':
plistvars += [('bundleidentifier', bundleidentifier)]
outfiles += [plistpath, pkginfopath]
builtres += writer.build(outfiles, 'plist', plists + assetsplists + xibplists, implicit = [os.path.join( 'build', 'ninja', 'plist.py')], variables = plistvars)
#Do code signing (might modify binary, but does not matter, nothing should have final binary as input anyway)
if codesign:
codesignvars = [('builddir', builddir), ('binname', binname), ('outpath', apppath), ('config', config)]
if self.target.is_ios():
if self.provisioning != '':
codesignvars += [('provisioning', self.provisioning)]
writer.build([os.path.join(apppath, '_CodeSignature', 'CodeResources'), os.path.join(apppath, '_CodeSignature'), apppath], 'codesign', builtbin, implicit = builtres + [os.path.join('build', 'ninja', 'codesign.py')], variables = codesignvars)
elif self.target.is_macos():
if self.provisioning != '':
codesignvars += [('provisioning', self.provisioning)]
if len(entitlements) > 0:
codesignvars += [('entitlements', entitlements[0])]
writer.build([os.path.join(apppath, 'Contents', '_CodeSignature', 'CodeResources'), os.path.join(apppath, 'Contents', '_CodeSignature'), os.path.join(apppath, 'Contents'), apppath], 'codesign', builtbin, implicit = builtres + [os.path.join('build', 'ninja', 'codesign.py')], variables = codesignvars)
return builtbin + builtsym + builtres
| |
# -*- coding: utf-8 -*-
"""
pygments.util
~~~~~~~~~~~~~
Utility functions.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import codecs
split_path_re = re.compile(r'[/\\ ]')
doctype_lookup_re = re.compile(r'''(?smx)
(<\?.*?\?>)?\s*
<!DOCTYPE\s+(
[a-zA-Z_][a-zA-Z0-9]*\s+
[a-zA-Z_][a-zA-Z0-9]*\s+
"[^"]*")
[^>]*>
''')
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>(?uism)')
class ClassNotFound(ValueError):
"""
If one of the get_*_by_* functions didn't find a matching class.
"""
class OptionError(Exception):
pass
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
string = options.get(optname, default)
if normcase:
string = string.lower()
if string not in allowed:
raise OptionError('Value for option %s must be one of %s' %
(optname, ', '.join(map(str, allowed))))
return string
def get_bool_opt(options, optname, default=None):
string = options.get(optname, default)
if isinstance(string, bool):
return string
elif isinstance(string, int):
return bool(string)
elif not isinstance(string, basestring):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
elif string.lower() in ('1', 'yes', 'true', 'on'):
return True
elif string.lower() in ('0', 'no', 'false', 'off'):
return False
else:
raise OptionError('Invalid value %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
def get_int_opt(options, optname, default=None):
string = options.get(optname, default)
try:
return int(string)
except TypeError:
raise OptionError('Invalid type %r for option %s; you '
'must give an integer value' % (
string, optname))
except ValueError:
raise OptionError('Invalid value %r for option %s; you '
'must give an integer value' % (
string, optname))
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
if isinstance(val, basestring):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
else:
raise OptionError('Invalid type %r for option %s; you '
'must give a list value' % (
val, optname))
def docstring_headline(obj):
if not obj.__doc__:
return ''
res = []
for line in obj.__doc__.strip().splitlines():
if line.strip():
res.append(" " + line.strip())
else:
break
return ''.join(res).lstrip()
def make_analysator(f):
"""
Return a static text analysation function that
returns float values.
"""
def text_analyse(text):
rv = f(text)
if not rv:
return 0.0
return min(1.0, max(0.0, float(rv)))
text_analyse.__doc__ = f.__doc__
return staticmethod(text_analyse)
def shebang_matches(text, regex):
"""
Check if the given regular expression matches the last part of the
shebang if one exists.
>>> from pygments.util import shebang_matches
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
True
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
False
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
... r'python(2\.\d)?')
True
It also checks for common windows executable file extensions::
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
True
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
the same as ``'perl -e'``)
Note that this method automatically searches the whole string (eg:
the regular expression is wrapped in ``'^$'``)
"""
index = text.find('\n')
if index >= 0:
first_line = text[:index].lower()
else:
first_line = text.lower()
if first_line.startswith('#!'):
try:
found = [x for x in split_path_re.split(first_line[2:].strip())
if x and not x.startswith('-')][-1]
except IndexError:
return False
regex = re.compile('^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
if regex.search(found) is not None:
return True
return False
def doctype_matches(text, regex):
"""
Check if the doctype matches a regular expression (if present).
Note that this method only checks the first part of a DOCTYPE.
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
"""
m = doctype_lookup_re.match(text)
if m is None:
return False
doctype = m.group(2)
return re.compile(regex).match(doctype.strip()) is not None
def html_doctype_matches(text):
"""
Check if the file looks like it has a html doctype.
"""
return doctype_matches(text, r'html\s+PUBLIC\s+"-//W3C//DTD X?HTML.*')
_looks_like_xml_cache = {}
def looks_like_xml(text):
"""
Check if a doctype exists or if we have some tags.
"""
key = hash(text)
try:
return _looks_like_xml_cache[key]
except KeyError:
m = doctype_lookup_re.match(text)
if m is not None:
return True
rv = tag_re.search(text[:1000]) is not None
_looks_like_xml_cache[key] = rv
return rv
# Python 2/3 compatibility
if sys.version_info < (3,0):
b = bytes = str
u_prefix = 'u'
import StringIO, cStringIO
BytesIO = cStringIO.StringIO
StringIO = StringIO.StringIO
uni_open = codecs.open
else:
import builtins
bytes = builtins.bytes
u_prefix = ''
def b(s):
if isinstance(s, str):
return bytes(map(ord, s))
elif isinstance(s, bytes):
return s
else:
raise TypeError("Invalid argument %r for b()" % (s,))
import io
BytesIO = io.BytesIO
StringIO = io.StringIO
uni_open = builtins.open
| |
'''
Utils
=====
'''
# pylint: disable=useless-object-inheritance
__all__ = ('platform', 'reify', 'deprecated')
from os import environ
from os import path
from sys import platform as _sys_platform
class Platform(object):
'''
Refactored to class to allow module function to be replaced
with module variable.
'''
def __init__(self):
self._platform_ios = None
self._platform_android = None
def __eq__(self, other):
return other == self._get_platform()
def __ne__(self, other):
return other != self._get_platform()
def __str__(self):
return self._get_platform()
def __repr__(self):
return 'platform name: \'{platform}\' from: \n{instance}'.format(
platform=self._get_platform(),
instance=super(Platform, self).__repr__()
)
def __hash__(self):
return self._get_platform().__hash__()
def _get_platform(self):
if self._platform_android is None:
# ANDROID_ARGUMENT and ANDROID_PRIVATE are 2 environment variables
# from python-for-android project
self._platform_android = 'ANDROID_ARGUMENT' in environ
if self._platform_ios is None:
self._platform_ios = (environ.get('KIVY_BUILD', '') == 'ios')
# On android, _sys_platform return 'linux2', so prefer to check the
# import of Android module than trying to rely on _sys_platform.
# pylint: disable=no-else-return
if self._platform_android is True:
return 'android'
elif self._platform_ios is True:
return 'ios'
elif _sys_platform in ('win32', 'cygwin'):
return 'win'
elif _sys_platform == 'darwin':
return 'macosx'
elif _sys_platform[:5] == 'linux':
return 'linux'
return 'unknown'
platform = Platform() # pylint: disable=invalid-name
class Proxy(object):
'''
Based on http://code.activestate.com/recipes/496741-object-proxying
version by Tomer Filiba, PSF license.
'''
__slots__ = ['_obj', '_name', '_facade']
def __init__(self, name, facade):
object.__init__(self)
object.__setattr__(self, '_obj', None)
object.__setattr__(self, '_name', name)
object.__setattr__(self, '_facade', facade)
def _ensure_obj(self):
obj = object.__getattribute__(self, '_obj')
if obj:
return obj
# do the import
try:
name = object.__getattribute__(self, '_name')
module = 'plyer.platforms.{}.{}'.format(
platform, name)
mod = __import__(module, fromlist='.')
obj = mod.instance()
except: # pylint: disable=bare-except
import traceback
traceback.print_exc()
facade = object.__getattribute__(self, '_facade')
obj = facade()
object.__setattr__(self, '_obj', obj)
return obj
def __getattribute__(self, name):
result = None
if name == '__doc__':
return result
# run _ensure_obj func, result in _obj
object.__getattribute__(self, '_ensure_obj')()
# return either Proxy instance or platform-dependent implementation
result = getattr(object.__getattribute__(self, '_obj'), name)
return result
def __delattr__(self, name):
object.__getattribute__(self, '_ensure_obj')()
delattr(object.__getattribute__(self, '_obj'), name)
def __setattr__(self, name, value):
object.__getattribute__(self, '_ensure_obj')()
setattr(object.__getattribute__(self, '_obj'), name, value)
def __bool__(self):
object.__getattribute__(self, '_ensure_obj')()
return bool(object.__getattribute__(self, '_obj'))
def __str__(self):
object.__getattribute__(self, '_ensure_obj')()
return str(object.__getattribute__(self, '_obj'))
def __repr__(self):
object.__getattribute__(self, '_ensure_obj')()
return repr(object.__getattribute__(self, '_obj'))
def whereis_exe(program):
''' Tries to find the program on the system path.
Returns the path if it is found or None if it's not found.
'''
path_split = ';' if platform == 'win' else ':'
for pth in environ.get('PATH', '').split(path_split):
folder = path.isdir(path.join(pth, program))
available = path.exists(path.join(pth, program))
if available and not folder:
return path.join(pth, program)
return None
class reify(object):
# pylint: disable=too-few-public-methods,invalid-name
'''
Put the result of a method which uses this (non-data) descriptor decorator
in the instance dict after the first call, effectively replacing the
decorator with an instance variable.
It acts like @property, except that the function is only ever called once;
after that, the value is cached as a regular attribute. This gives you lazy
attribute creation on objects that are meant to be immutable.
Taken from the `Pyramid project <https://pypi.python.org/pypi/pyramid/>`_.
To use this as a decorator::
@reify
def lazy(self):
...
return hard_to_compute_int
first_time = self.lazy # lazy is reify obj, reify.__get__() runs
second_time = self.lazy # lazy is hard_to_compute_int
'''
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, inst, cls):
if inst is None:
return self
retval = self.func(inst)
setattr(inst, self.func.__name__, retval)
return retval
def deprecated(obj):
'''
This is a decorator which can be used to mark functions and classes as
deprecated. It will result in a warning being emitted when a deprecated
function is called or a new instance of a class created.
In case of classes, the warning is emitted before the __new__ method
of the decorated class is called, therefore a way before the __init__
method itself.
'''
import warnings
from inspect import stack
from functools import wraps
from types import FunctionType, MethodType
new_obj = None
# wrap a function into a function emitting a deprecated warning
if isinstance(obj, FunctionType):
@wraps(obj)
def new_func(*args, **kwargs):
# get the previous stack frame and extract file, line and caller
# stack() -> caller()
call_file, call_line, caller = stack()[1][1:4]
# assemble warning
warning = (
'Call to deprecated function {} in {} line {}. '
'Called from {} line {}'
' by {}().\n'.format(
obj.__name__,
obj.__code__.co_filename,
obj.__code__.co_firstlineno + 1,
call_file, call_line, caller
)
)
warnings.warn('[{}] {}'.format('WARNING', warning))
# if there is a docstring present, emit docstring too
if obj.__doc__:
warnings.warn(obj.__doc__)
# return function wrapper
return obj(*args, **kwargs)
new_obj = new_func
# wrap a class into a class emitting a deprecated warning
# obj is class, type(obj) is metaclass, metaclasses inherit from type
elif isinstance(type(obj), type):
# we have an access to the metaclass instance (class) and need to print
# the warning when a class instance (object) is created with __new__
# i.e. when calling Class()
def obj_new(cls, child, *args, **kwargs):
'''
Custom metaclass instance's __new__ method with deprecated warning.
Calls the original __new__ method afterwards.
'''
# pylint: disable=unused-argument
# get the previous stack frame and extract file, line and caller
# stack() -> caller()
call_file, call_line, caller = stack()[1][1:4]
loc_file = obj.__module__
warnings.warn(
'[{}] Creating an instance of a deprecated class {} in {}.'
' Called from {} line {} by {}().\n'.format(
'WARNING', obj.__name__, loc_file,
call_file, call_line, caller
)
)
# if there is a docstring present, emit docstring too
if obj.__doc__:
warnings.warn(obj.__doc__)
# make sure nothing silly gets into the function
assert obj is cls
# we are creating a __new__ for a class that inherits from
# a deprecated class, therefore in this particular case
# MRO is (child, cls, object) > (cls, object)
if len(child.__mro__) > len(cls.__mro__):
assert cls is child.__mro__[1], (cls.__mro__, child.__mro__)
# we are creating __new__ directly for the deprecated class
# therefore MRO is the same for parent and child class
elif len(child.__mro__) == len(cls.__mro__):
assert cls is child
# return the class back with the extended __new__ method
return obj.__old_new__(child)
# back up the old __new__ method and create an extended
# __new__ method that emits deprecated warnings
obj.__old_new__ = obj.__new__
obj.__new__ = MethodType(obj_new, obj)
new_obj = obj
# return a function wrapper or an extended class
return new_obj
| |
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for the backup service base driver. """
import uuid
import mock
from oslo_serialization import jsonutils
from cinder.backup import driver
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.backup import fake_service
from cinder.volume import volume_types
_backup_db_fields = ['id', 'user_id', 'project_id',
'volume_id', 'host', 'availability_zone',
'display_name', 'display_description',
'container', 'status', 'fail_reason',
'service_metadata', 'service', 'size',
'object_count']
class BackupBaseDriverTestCase(test.TestCase):
def _create_volume_db_entry(self, id, size):
vol = {'id': id, 'size': size, 'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, backupid, volid, size,
userid=str(uuid.uuid4()),
projectid=str(uuid.uuid4())):
backup = {'id': backupid, 'size': size, 'volume_id': volid,
'user_id': userid, 'project_id': projectid}
return db.backup_create(self.ctxt, backup)['id']
def setUp(self):
super(BackupBaseDriverTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.volume_id = str(uuid.uuid4())
self.backup_id = str(uuid.uuid4())
self._create_backup_db_entry(self.backup_id, self.volume_id, 1)
self._create_volume_db_entry(self.volume_id, 1)
self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id)
self.driver = fake_service.FakeBackupService(self.ctxt)
def test_get_metadata(self):
json_metadata = self.driver.get_metadata(self.volume_id)
metadata = jsonutils.loads(json_metadata)
self.assertEqual(2, metadata['version'])
def test_put_metadata(self):
metadata = {'version': 1}
self.driver.put_metadata(self.volume_id, jsonutils.dumps(metadata))
def test_get_put_metadata(self):
json_metadata = self.driver.get_metadata(self.volume_id)
self.driver.put_metadata(self.volume_id, json_metadata)
def test_export_record(self):
export_record = self.driver.export_record(self.backup)
self.assertDictEqual({}, export_record)
def test_import_record(self):
export_record = {'key1': 'value1'}
self.assertIsNone(self.driver.import_record(self.backup,
export_record))
class BackupMetadataAPITestCase(test.TestCase):
def _create_volume_db_entry(self, id, size, display_name,
display_description):
vol = {'id': id, 'size': size, 'status': 'available',
'display_name': display_name,
'display_description': display_description}
return db.volume_create(self.ctxt, vol)['id']
def setUp(self):
super(BackupMetadataAPITestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.volume_id = str(uuid.uuid4())
self.backup_id = str(uuid.uuid4())
self.volume_display_name = 'vol-1'
self.volume_display_description = 'test vol'
self._create_volume_db_entry(self.volume_id, 1,
self.volume_display_name,
self.volume_display_description)
self.bak_meta_api = driver.BackupMetadataAPI(self.ctxt)
def _add_metadata(self, vol_meta=False, vol_glance_meta=False):
if vol_meta:
# Add some VolumeMetadata
db.volume_metadata_update(self.ctxt, self.volume_id,
{'fee': 'fi'}, False)
db.volume_metadata_update(self.ctxt, self.volume_id,
{'fo': 'fum'}, False)
if vol_glance_meta:
# Add some GlanceMetadata
db.volume_glance_metadata_create(self.ctxt, self.volume_id,
'disk_format', 'bare')
db.volume_glance_metadata_create(self.ctxt, self.volume_id,
'container_type', 'ovf')
def test_get(self):
# Volume won't have anything other than base by default
meta = self.bak_meta_api.get(self.volume_id)
s1 = set(jsonutils.loads(meta).keys())
s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META]
self.assertEqual(set(), s1.symmetric_difference(s2))
self._add_metadata(vol_glance_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
s1 = set(jsonutils.loads(meta).keys())
s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META]
self.assertEqual(set(), s1.symmetric_difference(s2))
self._add_metadata(vol_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
s1 = set(jsonutils.loads(meta).keys())
s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META,
self.bak_meta_api.TYPE_TAG_VOL_META]
self.assertEqual(set(), s1.symmetric_difference(s2))
def test_put(self):
meta = self.bak_meta_api.get(self.volume_id)
self.bak_meta_api.put(self.volume_id, meta)
self._add_metadata(vol_glance_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
self.bak_meta_api.put(self.volume_id, meta)
self._add_metadata(vol_meta=True)
meta = self.bak_meta_api.get(self.volume_id)
self.bak_meta_api.put(self.volume_id, meta)
def test_put_invalid_version(self):
container = jsonutils.dumps({'version': 3})
self.assertRaises(exception.BackupMetadataUnsupportedVersion,
self.bak_meta_api.put, self.volume_id, container)
def test_v1_restore_factory(self):
fact = self.bak_meta_api._v1_restore_factory()
keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META,
self.bak_meta_api.TYPE_TAG_VOL_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META]
self.assertEqual(set([]),
set(keys).symmetric_difference(set(fact.keys())))
meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META:
{'display_name': 'my-backed-up-volume',
'display_description': 'backed up description'},
self.bak_meta_api.TYPE_TAG_VOL_META: {},
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}}
# Emulate restore to new volume
volume_id = str(uuid.uuid4())
vol_name = 'restore_backup_%s' % (self.backup_id)
self._create_volume_db_entry(volume_id, 1, vol_name, 'fake volume')
for f in fact:
func = fact[f][0]
fields = fact[f][1]
func(meta_container[f], volume_id, fields)
vol = db.volume_get(self.ctxt, volume_id)
self.assertEqual('my-backed-up-volume', vol['display_name'])
self.assertEqual('backed up description', vol['display_description'])
def test_v1_restore_factory_no_restore_name(self):
fact = self.bak_meta_api._v1_restore_factory()
keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META,
self.bak_meta_api.TYPE_TAG_VOL_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META]
self.assertEqual(set([]),
set(keys).symmetric_difference(set(fact.keys())))
meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META:
{'display_name': 'my-backed-up-volume',
'display_description': 'backed up description'},
self.bak_meta_api.TYPE_TAG_VOL_META: {},
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}}
for f in fact:
func = fact[f][0]
fields = fact[f][1]
func(meta_container[f], self.volume_id, fields)
vol = db.volume_get(self.ctxt, self.volume_id)
self.assertEqual(self.volume_display_name, vol['display_name'])
self.assertEqual(self.volume_display_description,
vol['display_description'])
def test_v2_restore_factory(self):
fact = self.bak_meta_api._v2_restore_factory()
keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META,
self.bak_meta_api.TYPE_TAG_VOL_META,
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META]
self.assertEqual(set([]),
set(keys).symmetric_difference(set(fact.keys())))
volume_types.create(self.ctxt, 'faketype')
vol_type = volume_types.get_volume_type_by_name(self.ctxt, 'faketype')
meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META:
{'encryption_key_id': '123',
'volume_type_id': vol_type.get('id'),
'display_name': 'vol-2',
'display_description': 'description'},
self.bak_meta_api.TYPE_TAG_VOL_META: {},
self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}}
for f in fact:
func = fact[f][0]
fields = fact[f][1]
func(meta_container[f], self.volume_id, fields)
vol = db.volume_get(self.ctxt, self.volume_id)
self.assertEqual(self.volume_display_name, vol['display_name'])
self.assertEqual(self.volume_display_description,
vol['display_description'])
self.assertEqual('123', vol['encryption_key_id'])
def test_restore_vol_glance_meta(self):
# Fields is an empty list for _restore_vol_glance_meta method.
fields = []
container = {}
self.bak_meta_api._save_vol_glance_meta(container, self.volume_id)
self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id,
fields)
self._add_metadata(vol_glance_meta=True)
self.bak_meta_api._save_vol_glance_meta(container, self.volume_id)
self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id,
fields)
def test_restore_vol_meta(self):
# Fields is an empty list for _restore_vol_meta method.
fields = []
container = {}
self.bak_meta_api._save_vol_meta(container, self.volume_id)
# Extract volume metadata from container.
metadata = container.get('volume-metadata', {})
self.bak_meta_api._restore_vol_meta(metadata, self.volume_id,
fields)
self._add_metadata(vol_meta=True)
self.bak_meta_api._save_vol_meta(container, self.volume_id)
# Extract volume metadata from container.
metadata = container.get('volume-metadata', {})
self.bak_meta_api._restore_vol_meta(metadata, self.volume_id, fields)
def test_restore_vol_base_meta(self):
# Fields is a list with 'encryption_key_id' for
# _restore_vol_base_meta method.
fields = ['encryption_key_id']
container = {}
self.bak_meta_api._save_vol_base_meta(container, self.volume_id)
self.bak_meta_api._restore_vol_base_meta(container, self.volume_id,
fields)
def _create_encrypted_volume_db_entry(self, id, type_id, encrypted):
if encrypted:
key_id = str(uuid.uuid4())
vol = {'id': id, 'size': 1, 'status': 'available',
'volume_type_id': type_id, 'encryption_key_id': key_id}
else:
vol = {'id': id, 'size': 1, 'status': 'available',
'volume_type_id': type_id, 'encryption_key_id': None}
return db.volume_create(self.ctxt, vol)['id']
def test_restore_encrypted_vol_to_different_volume_type(self):
fields = ['encryption_key_id']
container = {}
# Create an encrypted volume
enc_vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type',
True)
# Create a second encrypted volume, of a different volume type
enc_vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type2',
True)
# Backup the first volume and attempt to restore to the second
self.bak_meta_api._save_vol_base_meta(container, enc_vol1_id)
self.assertRaises(exception.EncryptedBackupOperationFailed,
self.bak_meta_api._restore_vol_base_meta,
container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META],
enc_vol2_id, fields)
def test_restore_unencrypted_vol_to_different_volume_type(self):
fields = ['encryption_key_id']
container = {}
# Create an unencrypted volume
vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'vol_type1',
False)
# Create a second unencrypted volume, of a different volume type
vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'vol_type2',
False)
# Backup the first volume and restore to the second
self.bak_meta_api._save_vol_base_meta(container, vol1_id)
self.bak_meta_api._restore_vol_base_meta(
container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], vol2_id,
fields)
self.assertNotEqual(
db.volume_get(self.ctxt, vol1_id)['volume_type_id'],
db.volume_get(self.ctxt, vol2_id)['volume_type_id'])
def test_restore_encrypted_vol_to_same_volume_type(self):
fields = ['encryption_key_id']
container = {}
# Create an encrypted volume
enc_vol1_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type',
True)
# Create an encrypted volume of the same type
enc_vol2_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type',
True)
# Backup the first volume and restore to the second
self.bak_meta_api._save_vol_base_meta(container, enc_vol1_id)
self.bak_meta_api._restore_vol_base_meta(
container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], enc_vol2_id,
fields)
def test_restore_encrypted_vol_to_none_type_source_type_unavailable(self):
fields = ['encryption_key_id']
container = {}
enc_vol_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type',
True)
undef_vol_id = self._create_encrypted_volume_db_entry(
str(uuid.uuid4()), None, False)
self.bak_meta_api._save_vol_base_meta(container, enc_vol_id)
self.assertRaises(exception.EncryptedBackupOperationFailed,
self.bak_meta_api._restore_vol_base_meta,
container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META],
undef_vol_id, fields)
def test_restore_encrypted_vol_to_none_type_source_type_available(self):
fields = ['encryption_key_id']
container = {}
db.volume_type_create(self.ctxt, {'id': 'enc_vol_type_id',
'name': 'enc_vol_type'})
enc_vol_id = self._create_encrypted_volume_db_entry(str(uuid.uuid4()),
'enc_vol_type_id',
True)
undef_vol_id = self._create_encrypted_volume_db_entry(
str(uuid.uuid4()), None, False)
self.bak_meta_api._save_vol_base_meta(container, enc_vol_id)
self.bak_meta_api._restore_vol_base_meta(
container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], undef_vol_id,
fields)
self.assertEqual(
db.volume_get(self.ctxt, undef_vol_id)['volume_type_id'],
db.volume_get(self.ctxt, enc_vol_id)['volume_type_id'])
def test_filter(self):
metadata = {'a': 1, 'b': 2, 'c': 3}
self.assertEqual(metadata, self.bak_meta_api._filter(metadata, []))
self.assertEqual({'b': 2}, self.bak_meta_api._filter(metadata, ['b']))
self.assertEqual({}, self.bak_meta_api._filter(metadata, ['d']))
self.assertEqual({'a': 1, 'b': 2},
self.bak_meta_api._filter(metadata, ['a', 'b']))
def test_save_vol_glance_meta(self):
container = {}
self.bak_meta_api._save_vol_glance_meta(container, self.volume_id)
def test_save_vol_meta(self):
container = {}
self.bak_meta_api._save_vol_meta(container, self.volume_id)
def test_save_vol_base_meta(self):
container = {}
self.bak_meta_api._save_vol_base_meta(container, self.volume_id)
def test_is_serializable(self):
data = {'foo': 'bar'}
if self.bak_meta_api._is_serializable(data):
jsonutils.dumps(data)
def test_is_not_serializable(self):
data = {'foo': 'bar'}
with mock.patch.object(jsonutils, 'dumps') as mock_dumps:
mock_dumps.side_effect = TypeError
self.assertFalse(self.bak_meta_api._is_serializable(data))
mock_dumps.assert_called_once_with(data)
| |
#!/usr/bin/env python
##
## Copyright 2016 SRI International
## See COPYING file distributed along with the package for the copyright and license terms.
##
import pandas
import string
import time
import datetime
import numpy
#
# High-Risk Status
#
# List of SSAGA variables to recode as: 1=>12, 2=>14, 3=>17, 4=>AGE
ssaga_recode_as_age = [ 'asa_ao2dk', 'asb_ao2dk', 'asc1_ao6dk', 'asc2_ao6dk', 'as_ao9dk', 'as_ao10dk', 'as1_ao11dk', 'as2_ao11dk', 'as1_ao15dk', 'as2_ao15dk', 'as1_ao16dk',
'as2_ao16dk', 'asa1_ao14dk', 'asa2_ao14dk', 'asc1_ao14dk', 'asc2_ao14dk', 'as1_ao17dk', 'as2_ao17dk', 'as1_ao19dk', 'as2_ao19dk', 'as1_ao18dk',
'as2_ao18dk', 'as1_ao20dk', 'as2_ao20dk' ]
ssaga_variables = ssaga_recode_as_age + [ 'complete', 'missing', 'dotest',
'al1ageons', 'as2a', 'asa_ao2', 'as2b', 'asb_ao2', 'as6b', 'asc1_ao6', 'asc2_ao6', 'as9', 'as_ao9',
'as10a', 'as_ao10', 'as11', 'as1_ao11', 'as2_ao11', 'as15', 'as1_ao15', 'as2_ao15', 'as16', 'as1_ao16', 'as2_ao16',
'as14', 'asa1_ao14', 'asa2_ao14', 'as14b', 'asc1_ao14', 'asc2_ao14', 'as17a', 'as1_ao17', 'as2_ao17',
'as19', 'as1_ao19', 'as2_ao19', 'as18b', 'as1_ao18', 'as2_ao18', 'as20', 'as1_ao20', 'as2_ao20',
'oc1', 'oc_ao8', 'oc9', 'oc_ao16',
'pn1x', 'pn2a', 'pn2b', 'pn5', 'pn_ao8', 'pn_ao8dk', 'pn_ao8dk',
'dp4a', 'dp4b', 'dp3', 'dp3_1', 'dp11', 'dp12', 'dp15a', 'dp15b', 'dp15c', 'dp15d' ]
input_fields = { 'youthreport1' : [ 'youthreport1_yfhi3a_yfhi3a', 'youthreport1_yfhi3a_yfhi3f', 'youthreport1_yfhi4a_yfhi4a', 'youthreport1_yfhi4a_yfhi4f',
'youthreport1_yfhi3a_yfhi3b', 'youthreport1_yfhi4a_yfhi4b',
'youthreport1_yfhi3a_yfhi3c', 'youthreport1_yfhi4a_yfhi4c',
'youthreport1_yfhi3a_yfhi3g', 'youthreport1_yfhi4a_yfhi4g',
'youthreport1_yfhi3a_yfhi3h', 'youthreport1_yfhi4a_yfhi4h',
'youthreport1_date_interview', 'youth_report_1_complete', 'youthreport1_missing' ],
'parentreport' : [ 'parentreport_pfhi3a_pfhi3a', 'parentreport_pfhi3a_pfhi3f', 'parentreport_pfhi4a_pfhi4a', 'parentreport_pfhi4a_pfhi4f',
'parentreport_pfhi3a_pfhi3b', 'parentreport_pfhi4a_pfhi4b',
'parentreport_pfhi3a_pfhi3c', 'parentreport_pfhi4a_pfhi4c',
'parentreport_pfhi3a_pfhi3g', 'parentreport_pfhi4a_pfhi4g',
'parentreport_pfhi3a_pfhi3h', 'parentreport_pfhi4a_pfhi4h',
'parentreport_date_interview', 'parent_report_complete', 'parentreport_missing' ],
'ssaga_youth' : [ 'ssaga_youth_%s' % var for var in ssaga_variables ],
'ssaga_parent' : [ 'ssaga_parent_%s' % var for var in ssaga_variables ] }
output_form = 'highrisk'
#
# Recode one field as age
#
def recode_field_as_age( code, age ):
if code == 1:
return 12
elif code == 2:
return 14
elif code == 3:
return 17
else:
return age
#
# Compute the "parhx" variable
#
def compute_parhx( row ):
have_youth_report = (row['youth_report_1_complete'] > 0) and not (row['youthreport1_missing'] > 0)
have_parent_report = (row['parent_report_complete'] > 0) and not (row['parentreport_missing'] > 0)
if have_youth_report or have_parent_report:
parhx = 0
if (have_youth_report and (row['youthreport1_yfhi3a_yfhi3a']==1 or row['youthreport1_yfhi4a_yfhi4a']==1)) or (have_parent_report and (row['parentreport_pfhi3a_pfhi3a']==1 or row['parentreport_pfhi4a_pfhi4a']==1)):
parhx += 1
if (have_youth_report and (row['youthreport1_yfhi3a_yfhi3f']==1 or row['youthreport1_yfhi4a_yfhi4f']==1)) or (have_parent_report and (row['parentreport_pfhi3a_pfhi3a']==1 or row['parentreport_pfhi4a_pfhi4f']==1)):
parhx += 1
return parhx
else:
return numpy.nan
#
# Compute the "gparhx" variable
#
def compute_gparhx( row ):
have_youth_report = (row['youth_report_1_complete'] > 0) and not (row['youthreport1_missing'] > 0)
have_parent_report = (row['parent_report_complete'] > 0) and not (row['parentreport_missing'] > 0)
if have_youth_report or have_parent_report:
gparhx = 0
if (have_youth_report and (row['youthreport1_yfhi3a_yfhi3b']==1 or row['youthreport1_yfhi4a_yfhi4b']==1)) or (have_parent_report and (row['parentreport_pfhi3a_pfhi3b']==1 or row['parentreport_pfhi4a_pfhi4b']==1)):
gparhx += 1
if (have_youth_report and (row['youthreport1_yfhi3a_yfhi3c']==1 or row['youthreport1_yfhi4a_yfhi4c']==1)) or (have_parent_report and (row['parentreport_pfhi3a_pfhi3c']==1 or row['parentreport_pfhi4a_pfhi4c']==1)):
gparhx += 1
if (have_youth_report and (row['youthreport1_yfhi3a_yfhi3g']==1 or row['youthreport1_yfhi4a_yfhi4g']==1)) or (have_parent_report and (row['parentreport_pfhi3a_pfhi3g']==1 or row['parentreport_pfhi4a_pfhi4g']==1)):
gparhx += 1
if (have_youth_report and (row['youthreport1_yfhi3a_yfhi3h']==1 or row['youthreport1_yfhi4a_yfhi4h']==1)) or (have_parent_report and (row['parentreport_pfhi3a_pfhi3h']==1 or row['parentreport_pfhi4a_pfhi4h']==1)):
gparhx += 1
return gparhx
else:
return numpy.nan
#
# Compute the "extern" variable
#
def compute_extern( row, ssaga ):
if (row['ssaga_%s_complete' % ssaga] > 0) and not (row['ssaga_%s_missing' % ssaga] > 0):
# First extract the fields for this particular SSAGA into an easy-to-query dictionary
this_ssaga = dict()
for var in ssaga_variables:
this_ssaga[var] = row['ssaga_%s_%s' % (ssaga,var)]
age_onset = this_ssaga['al1ageons']
if age_onset == numpy.nan:
return numpy.nan
extern = 0
if this_ssaga['as2a']==5 and min( this_ssaga['asa_ao2'], this_ssaga['asa_ao2dk'] ) < age_onset:
extern += 1
if this_ssaga['as2b']==5 and min( this_ssaga['asb_ao2'], this_ssaga['asb_ao2dk'] ) < age_onset:
extern += 1
if this_ssaga['as6b'] > 1 and min( this_ssaga['asc1_ao6'], this_ssaga['asc2_ao6'], this_ssaga['asc1_ao6dk'], this_ssaga['asc2_ao6dk'] ) < age_onset:
extern += 1
if this_ssaga['as9']==5 and min( this_ssaga['as_ao9'], this_ssaga['as_ao9dk']) < age_onset:
extern += 1
if this_ssaga['as10a'] > 1 and min( this_ssaga['as_ao10'], this_ssaga['as_ao10dk'] ) < age_onset:
extern += 1
if this_ssaga['as11'] > 1 and min( this_ssaga['as1_ao11'], this_ssaga['as2_ao11'], this_ssaga['as1_ao11dk'], this_ssaga['as2_ao11dk'] ) < age_onset:
extern += 1
if this_ssaga['as15'] > 1 and min( this_ssaga['as1_ao15'], this_ssaga['as2_ao15'], this_ssaga['as1_ao15dk'], this_ssaga['as2_ao15dk'] ) < age_onset:
extern += 1
if this_ssaga['as16'] > 1 and min( this_ssaga['as1_ao16'], this_ssaga['as2_ao16'], this_ssaga['as1_ao16dk'], this_ssaga['as2_ao16dk'] ) < age_onset:
extern += 1
# The next check is an "and" of two longer expressions; implement this as nested if's for readability
if this_ssaga['as14'] > 1 and min( this_ssaga['asa1_ao14'], this_ssaga['asa2_ao14'], this_ssaga['asa1_ao14dk'], this_ssaga['asa2_ao14dk'] ) < age_onset:
if this_ssaga['as14b'] > 1 and min( this_ssaga['asc1_ao14'], this_ssaga['asc2_ao14'], this_ssaga['asc1_ao14dk'], this_ssaga['asc2_ao14dk'] ) < age_onset:
extern += 1
if this_ssaga['as17a'] == 5 and min( this_ssaga['as1_ao17'], this_ssaga['as2_ao17'], this_ssaga['as1_ao17dk'], this_ssaga['as2_ao17dk'] ) < age_onset:
extern += 1
if this_ssaga['as19'] > 1 and min( this_ssaga['as1_ao19'], this_ssaga['as2_ao19'], this_ssaga['as1_ao19dk'], this_ssaga['as2_ao19dk'] ) < age_onset:
extern += 1
if this_ssaga['as18b'] == 5 and min( this_ssaga['as1_ao18'], this_ssaga['as2_ao18'], this_ssaga['as1_ao18dk'], this_ssaga['as2_ao18dk'] ) < age_onset:
extern += 1
if this_ssaga['as20'] > 1 and min( this_ssaga['as1_ao20'], this_ssaga['as2_ao20'], this_ssaga['as1_ao20dk'], this_ssaga['as2_ao20dk'] ) < age_onset:
extern += 1
return extern
return numpy.nan
#
# Call "extern" computation for Youth SSAGA
#
def compute_extern_youth( row ):
return compute_extern( row, 'youth' )
#
# Call "extern" computation for Parent SSAGA
#
def compute_extern_parent( row ):
return compute_extern( row, 'parent' )
#
# Compute the "intern" variable
#
def compute_intern( row, ssaga ):
if (row['ssaga_%s_complete' % ssaga] > 0) and not (row['ssaga_%s_missing' % ssaga] > 0):
# First extract the fields for this particular SSAGA into an easy-to-query dictionary
this_ssaga = dict()
for var in ssaga_variables:
this_ssaga[var] = row['ssaga_%s_%s' % (ssaga,var)]
age_onset = this_ssaga['al1ageons']
if age_onset == numpy.nan:
return numpy.nan
intern = 0
if this_ssaga['oc1']==5 and (this_ssaga['oc_ao8'] < age_onset):
intern += 1
if this_ssaga['oc9']==5 and (this_ssaga['oc_ao16'] < age_onset):
intern += 1
if (this_ssaga['pn1x']==5 or this_ssaga['pn2a']==5 or this_ssaga['pn2b']==5 or this_ssaga['pn5'] > 2) and ((this_ssaga['pn_ao8'] < age_onset) or (this_ssaga['pn_ao8dk']==1 and age_onset > 10) or (this_ssaga['pn_ao8dk']==2 and age_onset > 20)):
intern += 1
dp3_age_check = (this_ssaga['dp3'] < age_onset) or (this_ssaga['dp3_1']==1 and age_onset > 10) or (this_ssaga['dp3_1']==2 and age_onset > 20)
if (this_ssaga['dp4a']==5 or this_ssaga['dp4b']==5) and dp3_age_check:
intern += 1
if (this_ssaga['dp11']==5 or this_ssaga['dp12']==5) and dp3_age_check:
intern += 1
if (this_ssaga['dp15a']==5 or this_ssaga['dp15b']==5 or this_ssaga['dp15c']==5 or this_ssaga['dp15d']==5) and dp3_age_check:
intern += 1
return intern
return numpy.nan
#
# Call "intern" computation for Youth SSAGA
#
def compute_intern_youth( row ):
return compute_intern( row, 'youth' )
#
# Call "intern" computation for Parent SSAGA
#
def compute_intern_parent( row ):
return compute_intern( row, 'parent' )
#
# Compute risk status for one row (i.e., one record)
#
def compute_status( row ):
status = 0
if row['highrisk_gparhx'] > 1 or row['highrisk_parhx'] > 1:
status = 1
if (row['ssaga_youth_complete'] and not (row['ssaga_youth_missing'] > 0) and (row['ssaga_youth_al1ageons'] <= 14)):
status = 1
if (row['ssaga_parent_complete'] and not (row['ssaga_parent_missing'] > 0) and (row['ssaga_parent_al1ageons'] <= 14)):
status = 1
if row['highrisk_yss_intern'] > 2 or row['highrisk_pss_intern'] > 2:
status = 1
if row['highrisk_yss_extern'] > 2 or row['highrisk_pss_extern'] > 2:
status = 1
return status
#
# Driver function - go through the steps of status determination
#
def compute_scores( data, demographics ):
outfield_list = [ 'highrisk_parhx', 'highrisk_gparhx',
'highrisk_yss_intern', 'highrisk_yss_extern', 'highrisk_yss_al1ageons',
'highrisk_pss_intern', 'highrisk_pss_extern', 'highrisk_pss_al1ageons',
'highrisk_status', 'highrisk_complete' ]
for outfield in outfield_list:
data[outfield] = 0
# First, for each SSAGA (Youth and Parent) determine subject age and re-code age-related fields according to lookup table
date_format_ymd = '%Y-%m-%d'
for key, row in data.iterrows():
try:
dob = datetime.datetime.strptime( demographics['dob'][key[0]], date_format_ymd )
for ssaga in ['youth','parent']:
if (row['ssaga_%s_complete' % ssaga] > 0) and not (row['ssaga_%s_missing' % ssaga] > 0):
try:
age = (datetime.datetime.strptime( row['ssaga_%s_dotest' % ssaga], date_format_ymd ) - dob).days / 365.242
except:
#Old Printing method
#print 'WARNING: Problem parsing',ssaga,'SSAGA date',row['ssaga_%s_dotest' % ssaga],'for subject',key[0],row['ssaga_%s_record_id'%ssaga]
error = dict(subject_id=key[0],
ssage=ssage,
ssaga_date=row['ssaga_%s_dotest' % ssaga],
error='WARNING: Problem parsing.')
print(json.dumps(error, sort_keys=True))
age = numpy.nan
for column in ssaga_recode_as_age:
fieldname = 'ssaga_%s_%s' % (ssaga,var)
data[fieldname][key] = recode_field_as_age( data[fieldname][key], age )
except:
#Old Printing Method
#print 'WARNING: Problem determining DOB for subject',key[0]
error = dict(subject_id = key[0],
error = 'WARNING: Problem determining DOB for subject')
for column in ssaga_recode_as_age:
for ssaga in ['youth','parent']:
fieldname = 'ssaga_%s_%s' % (ssaga,var)
data[fieldname][key] = numpy.nan
# Second, compute "parhx" and "gparhx" from youth and/or parent repor
data['highrisk_parhx'] = data.apply( compute_parhx, axis=1 )
data['highrisk_gparhx'] = data.apply( compute_gparhx, axis=1 )
# Third, compute "internalizing" from Youth and/or Parent SSAGA
data['highrisk_yss_intern'] = data.apply( compute_intern_youth, axis=1 )
data['highrisk_pss_intern'] = data.apply( compute_intern_parent, axis=1 )
# Fourth, compute "exterrnalizing" from Youth and/or Parent SSAGA
data['highrisk_yss_extern'] = data.apply( compute_extern_youth, axis=1 )
data['highrisk_pss_extern'] = data.apply( compute_extern_parent, axis=1 )
# Fifth, compute composite "risk status"
data['highrisk_status'] = data.apply( compute_status, axis=1 )
# Sixth, for good measure, copy the "Age of Onset" columns from the two SSAGA instruments
data['highrisk_yss_al1ageons'] = data['ssaga_youth_al1ageons']
data['highrisk_pss_al1ageons'] = data['ssaga_parent_al1ageons']
# Finally, convert everything to strings (and nan to emptry string) to avoid validation errors
for outfield in outfield_list:
data[outfield] = data[outfield].map( lambda x: str(int(x)) if str(x) != 'nan' else '' )
data['highrisk_complete'] = '1'
return data[ outfield_list ]
| |
# Source activate.sh (of QIIME) before running QIIME on EC2 because non-interactive shell (such as ssh) has $PATH set
# differently compared to interactive (logged in) shell.
import os
import commands
import sys
import re
import subprocess
import logging
from datetime import datetime
def input_file_help():
print """
Help me please!!
The input file should be tab-delimited file with .txt extension. The first column should be
folder name of the MiSeq run. The second column should be the name of the mapping file of the run along
with its .txt extension. There should be no trailing white spaces or empty last lines.
Following is how a correct file should be:
140401_M01869_0071_000000000-A7YEF mapping_file_run1.txt
140407_M01869_0073_000000000-A7WVG mapping_file_run2.txt
"""
def input_check(infile):
""" Checks if input file name is entered correctly """
if infile == "":
print "Error: File name not provided!"
mapfile = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
return input_check(mapfile)
elif infile.lower() == "help":
input_file_help()
mapfile = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
return input_check(mapfile)
else:
working_folder = commands.getstatusoutput('pwd')[1]
filelist = os.listdir(working_folder)
if infile not in filelist:
print "Error: File doesn't exist!"
mapfile = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
return input_check(mapfile)
else:
maplist = []
infl = open(infile, 'rU')
for line in infl:
spline = line.strip().split("\t")
if len(spline) != 2:
print "Error: File is not in proper format. There's missing data, no tab-seperation and/or extra empty line(s)."
mapfile = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
return input_check(mapfile)
else:
maplist.append(spline[1])
return maplist, infile # Returns list of mapping files along with name of input file
def mapping_check(maplist):
"""Checks if mapping file name is correct and runs validate_mapping_file.py script """
for mapfile in maplist:
with open(os.devnull, "w") as fnull:
result = subprocess.call(["ls", mapfile], stdout = fnull, stderr = fnull)
if result != 0: # Return code is 0 is ls command is successful
print "Error: One or more of your mapfiles is not present in your current working directory"
mapfile2 = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
inp_check = input_check(mapfile2)
return mapping_check(inp_check[0])
for mapfile in maplist:
filename = mapfile.strip().split(".txt")[0]
os.system("validate_mapping_file.py -m %s -o corrected_%s" % (mapfile.strip(),filename))
os.system("mv $PWD/corrected_%s/%s_corrected.txt ." % (filename,filename))
corrected_files = [mapfile.strip().split(".txt")[0]+"_corrected.txt" for mapfile in maplist]
return corrected_files
def check_value(expression,question,arg):
""" Function to check if input parameters are correct """
try:
if arg == "integer":
return str(int(expression))
if arg == "float":
return str(float(expression))
except ValueError:
if expression == "":
return expression
else:
print "Invalid value. Please enter a number or just hit enter for default value."
checker = raw_input(question)
return check_value(checker,question,arg)
def log_output(statement):
""" Logs and prints output messages """
logging.basicConfig(filename='logging_module_output.txt',level=logging.INFO)
logging.info(statement)
print statement
def log_parse(outfile,inputfile):
output = open(outfile, "w")
infile = open(inputfile, 'rU')
for line in infile:
if line.startswith("INFO:root:"):
linename = line.strip().split("INFO:root:")
if linename[1] != '':
output.write(linename[1]+"\n")
else:
output.write(line.strip()+"\n")
output.close()
return output
def filtFast(flashread,indexfile):
""" Shoko's script as a function """
headerData = open(flashread,"rU").read().strip()
headers = headerData.split("\n")
IndexData = open(indexfile,"rU")
IndexSeqs = {}
while IndexData:
headerline=IndexData.readline().split("\n")[0]
Contents = ''
if headerline == '':
break
for i in range(0,3):
Contents += IndexData.readline().split("\n")[0] + "\n"
IndexSeqs[headerline]=Contents
outdata=''
for j in headers:
outdata += j + "\n" + IndexSeqs[j]
of = open("Index_filtered_ordered.fastq","w")
of.write(outdata)
IndexData.close()
of.close()
return of
def preproceSteps(seq_data,m_min,read_len,runmap_file):
""" Unzipping, flashing, pre-processing """
if m_min == "":
m_min += "225"
if read_len == "":
read_len += "251"
if seq_data == "":
seq_data += "/data/MiSeq_16S_data/MiSeqAnalysis"
log_output("\nRead length for flash: %s" % read_len)
log_output("Min. overlap for flash: %s" % m_min)
folders = []
infile = open(runmap_file, 'rU')
for line in infile:
spline = line.strip().split("\t")
folders.append(spline[0])
for seqs_id in folders:
working_folder = commands.getstatusoutput('pwd')[1]
seq_path = "%s/%s/Data/Intensities/BaseCalls/" % (seq_data,seqs_id)
os.chdir(seq_path)
log_output("\n#Step 1: Gunzipping sequence reads files in MiSeqAnalysis folder...")
os.system("gunzip Undetermined_*")
log_output("Gunzipping complete!")
log_output("\n#Step 2: Assembling R1 and R2 using flash...")
os.system("flash -r %s -f 300 -s 30 -m %s -d $PWD/Output_folder_%s/ -q Undetermined_S0_L001_R1_001.fastq Undetermined_S0_L001_R2_001.fastq" % (read_len, m_min,seqs_id))
log_output("flash complete!")
os.system("mv -f Output_folder_%s/ %s" % (seqs_id,working_folder))
os.chdir(working_folder)
log_output("\n#Step 3: Removing barcode reads from index file that are not in assembled file...")
os.system("sed -n '1~4'p $PWD/Output_folder_%s/out.extendedFrags.fastq >FLAShReads.txt" % seqs_id) # Select the headers of all sequences generated. -n flag is for quiet output. '1~4'p means starting from 1, select every 4 lines after it.
log_output("Barcode removal complete!")
log_output("\n#Step 4: Extracting those reads from index file and order them the same as flash reads")
filtFast("FLAShReads.txt","%s/Undetermined_S0_L001_I1_001.fastq" % seq_path)
log_output("Extraction complete!")
os.chdir(seq_path)
log_output("\n#Step 5: Gzipping back the sequence files in MiSeqAnalysis folder...")
os.system("gzip Undetermined_S0_L001_*")
os.chdir(working_folder)
os.system("mv Index_filtered_ordered.fastq Index_filtered_ordered_run_%s.fastq" % seqs_id)
log_output("Gzip complete!")
return
def check_ec_permission(ec_key):
""" Check file permission of EC2 key """
val = 'ls -l %s' % ec_key
result = list(subprocess.Popen([val], shell=True, stdout=subprocess.PIPE).stdout)
if result[0].split("1")[0].strip() == "-r--------":
print "No need to change file permission."
return
else:
os.system("chmod 400 %s" % ec_key)
return
def scp_amazon(runmap_file,ec_key,dns,anl_fold):
""" Copy relevant files and folders to EC2 instance """
run_map_dict = {}
infile = open(runmap_file, 'rU')
for line in infile:
spline = line.strip().split("\t")
run_map_dict[spline[0]] = spline[1].strip().split(".txt")[0]+"_corrected"+".txt" #Run IDs as keys and mapping filenames as values
mkdir = "mkdir %s/" % anl_fold
os.system("ssh -i %s ubuntu@%s %s" % (ec_key,dns,mkdir))
for fold_id in run_map_dict:
folder = "Output_folder_"+fold_id
log_output("\n#Step 6: Copying relevant files to the Amazon EC2 instance of QIIME...")
os.system("scp -r -i %s %s ubuntu@%s:/home/ubuntu/%s" % (ec_key,folder,dns,anl_fold))
os.system("scp -i %s Index_filtered_ordered_run_%s.fastq ubuntu@%s:/home/ubuntu/%s" % (ec_key,fold_id,dns,anl_fold))
mapfile = run_map_dict[fold_id]
os.system("scp -i %s %s ubuntu@%s:/home/ubuntu/%s" % (ec_key,mapfile,dns,anl_fold))
os.system("scp -i %s %s ubuntu@%s:/home/ubuntu/%s" % (ec_key,runmap_file,dns,anl_fold))
log_output("All files copied to EC2 instace!\n")
return
def split_library(ec_key,dns,anl_fold,runmap_file,phred,max_bad_run,min_rl_frac,n_chars,barcode,start_seq):
""" Splits libraries """
if phred == "":
phred += "30"
if max_bad_run == "":
max_bad_run += "3"
if min_rl_frac == "":
min_rl_frac += "0.75"
if n_chars == "":
n_chars += "0"
if barcode == "":
barcode += "12"
if start_seq == "":
start_seq += "0"
log_output("Phred score: %s" % phred)
log_output("Max number of consecutive low quality base calls allowed before truncating a read: %s" % max_bad_run)
log_output("Min number of consecutive high quality base calls to include a \
read (per single end read) as a fraction of the input read length: %s" % min_rl_frac)
log_output("Max number of N characters allowed in a sequence to retain it: %s" % n_chars)
log_output("The type of barcode used: %s" % barcode)
log_output("The start seq_ids as ascending integers beginning with start_seq_id: %s" % start_seq)
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && mkdir fna_files/'" % (ec_key,dns,anl_fold))
run_map_dict = {}
infile = open(runmap_file, 'rU')
for line in infile:
spline = line.strip().split("\t")
run_map_dict[spline[0]] = spline[1].strip().split(".txt")[0]+"_corrected"+".txt" #Run IDs as keys and mapping filenames as values
for fold_id in run_map_dict:
folder = "Output_folder_"+fold_id
mapfile = run_map_dict[fold_id]
log_output("\n#Step 7: Splitting libraries using 'split_libraries_fastq.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && split_libraries_fastq.py -i %s/out.extendedFrags.fastq -m %s \
-o split_lib_output_%s/ -q %s -r %s -p %s -n %s\
--rev_comp_barcode -b Index_filtered_ordered_run_%s.fastq \
--barcode_type %s -s %s'" % (ec_key,dns,anl_fold,folder,mapfile,fold_id,phred,max_bad_run,min_rl_frac,n_chars,fold_id,barcode,start_seq))
log_output("split_libraries_fastq.py complete!")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && mv split_lib_output_%s/seqs.fna seqs_%s.fna'" % (ec_key,dns,anl_fold,fold_id,fold_id))
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && mv seqs_%s.fna fna_files/'" % (ec_key,dns,anl_fold,fold_id))
return
def open_otus_till_biom(parallel,ref_db,ec_key,dns,anl_fold):
""" Open OTU picking and other steps """
if parallel == "":
parallel += "4"
if ref_db == "":
ref_db += "/home/ubuntu/qiime_software/gg_otus-13_8-release/rep_set/97_otus.fasta"
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && cat fna_files/*.fna > fna_files/seqs_cat.fna'" % (ec_key,dns,anl_fold))
log_output("\n#Step 8: Picking open-references OTUs using 'pick_open_reference_otus.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && pick_open_reference_otus.py -i fna_files/seqs_cat.fna -o open_otus_picked/ -aO %s -r %s'" % (ec_key,dns,anl_fold,parallel,ref_db))
log_output("OTU picking caused errors, but we'll be able to proceed!")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && cp /home/ubuntu/qiime_software/core_set_aligned.fasta.imputed .'" % (ec_key,dns,anl_fold))
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && mv core_set_aligned.fasta.imputed core_set_aligned_imputed.fasta'" % (ec_key,dns,anl_fold))
log_output("\n#Step 9.0: Aligning sequences to template using 'parallel_align_seqs_pynast.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && parallel_align_seqs_pynast.py -i open_otus_picked/rep_set.fna -o open_otus_picked/pynast_aligned_seqs \
-t $PWD/core_set_aligned_imputed.fasta --jobs_to_start %s'" % (ec_key,dns,anl_fold,parallel))
log_output("parallel_align_seqs_pynast.py complete!")
log_output("\n#Step 9.1: Making OTU table by filtering alignment to remove sequences that did not align using 'make_otu_table.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && make_otu_table.py -i open_otus_picked/final_otu_map_mc2.txt -o open_otus_picked/otu_table_mc2_no_pynast_failures_w_tax.biom \
-e open_otus_picked/pynast_aligned_seqs/rep_set_failures.fasta -t open_otus_picked/uclust_assigned_taxonomy/rep_set_tax_assignments.txt'" % (ec_key,dns,anl_fold))
log_output("make_otu_table.py complete!")
log_output("\n#Step 9.2: Identifying chimeric sequences using 'parallel_identify_chimeric_seqs.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && parallel_identify_chimeric_seqs.py -i open_otus_picked/pynast_aligned_seqs/rep_set_aligned.fasta -a core_set_aligned_imputed.fasta \
-m ChimeraSlayer -o chimeraslayer_chimeric_seqs.txt -O %s'" % (ec_key,dns,anl_fold,parallel))
log_output("parallel_identify_chimeric_seqs.py complete!")
log_output("\n#Step 10: Filtering chimeric sequences out of the alignment file using 'filter_fasta.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && filter_fasta.py -f open_otus_picked/pynast_aligned_seqs/rep_set_aligned.fasta -o non_chimeric_rep_set_aligned.fasta \
-s chimeraslayer_chimeric_seqs.txt -n'" % (ec_key,dns,anl_fold))
log_output("filter_fasta.py complete!")
log_output("\n#Step 11: Filtering non_chimeric_rep_set_aligned.fasta to remove gaps using 'filter_alignment.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && filter_alignment.py -i non_chimeric_rep_set_aligned.fasta -m /home/ubuntu/qiime_software/lanemask_in_1s_and_0s \
-o non_chimeric_pynast_filtered/'" % (ec_key,dns,anl_fold))
log_output("filter_alignment.py complete!")
log_output("\n#Step 12: Building new phylogenetic tree using 'make_phylogeny.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && make_phylogeny.py -i non_chimeric_pynast_filtered/non_chimeric_rep_set_aligned_pfiltered.fasta \
-o non_chimeric_rep_set_aligned_pfiltered.tre'" % (ec_key,dns,anl_fold))
log_output("make_phylogeny.py complete!")
log_output("\n#Step 13: Filtering chimeric OTUs from the OTU table using 'filter_otus_from_otu_table.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && filter_otus_from_otu_table.py -i open_otus_picked/otu_table_mc2_no_pynast_failures_w_tax.biom \
-o otu_table_mc2_no_pynast_failures_no_chimeras_w_tax.biom -e chimeraslayer_chimeric_seqs.txt'" % (ec_key,dns,anl_fold))
log_output("filter_otus_from_otu_table.py complete!")
log_output("\n#Step 14: Writing biom table summary using 'biom summarize-table'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && biom summarize-table -i otu_table_mc2_no_pynast_failures_no_chimeras_w_tax.biom \
-o otu_table_mc2_no_pynast_failures_no_chimeras_lowfilter_w_tax_biom_summary_mc2.txt'" % (ec_key,dns,anl_fold)) #0:00:01.602008
log_output("biom summarize-table complete!")
return
def summary_view(ec_key,dns,anl_fold,viewtable):
""" Function to show biom summary table """
if viewtable.lower() == 'yes':
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && less otu_table_mc2_no_pynast_failures_no_chimeras_lowfilter_w_tax_biom_summary_mc2.txt'" % (ec_key,dns,anl_fold))
elif viewtable.lower() == 'no':
print "No is not an option!"
table = raw_input("The summary table of the final OTU table is ready. Type 'yes' to view it. \
Once viewed, you can quit by simply typing q. Are you ready? ")
return summary_view(ec_key,dns,anl_fold,table)
else:
print "I don't understand."
table = raw_input("The summary table of the final OTU table is ready. Type 'yes' to view it. \
Once viewed, you can quit by simply typing q. Are you ready? ")
return summary_view(ec_key,dns,anl_fold,table)
def rarefaction_check(depth):
""" Check value of rarefaction depth """
try:
return str(int(float(depth)))
except ValueError:
if depth == "":
print "No number of sequences provided to subsample for rarefaction."
dep = raw_input("1) What is the number of sequences to subsample per sample [-d flag]? (No default): ")
return rarefaction_check(dep)
else:
print "Non-integer value given for number of sequences to subsample for rarefaction."
dep = raw_input("1) What is the number of sequences to subsample per sample [-d flag]? (No default): ")
return rarefaction_check(dep)
def summary_plots(ec_key,dns,anl_fold,depth,merge_metadata):
""" Create alpha, beta and taxa summary plots """
log_output("\n#Step 15: Performing single rarefaction on OTU table using 'single_rarefaction.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && single_rarefaction.py -i otu_table_mc2_no_pynast_failures_no_chimeras_w_tax.biom -o single_rarefied_otu_table.biom -d %s'" % (ec_key,dns,anl_fold,depth))
log_output("single_rarefaction.py complete!")
log_output("\n#Step 16: Summarizing and plotting taxa using 'summarize_taxa_through_plots.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && summarize_taxa_through_plots.py -o taxa_summary -i single_rarefied_otu_table.biom -m %s'" % (ec_key,dns,anl_fold,merge_metadata))
log_output("summarize_taxa_through_plots.py complete!")
log_output("\n#Step 17: Calculating alpha-diversity using 'alpha_rarefaction.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && alpha_rarefaction.py -i single_rarefied_otu_table.biom -o alpha_rarefaction/ -t non_chimeric_rep_set_aligned_pfiltered.tre \
-m %s --retain_intermediate_files'" % (ec_key,dns,anl_fold,merge_metadata))
log_output("alpha_rarefaction.py complete!")
log_output("\n#Step 18: Calculating beta-diversity using 'beta_diversity_through_plots.py'...")
os.system("ssh -i %s ubuntu@%s 'source $HOME/qiime_software/activate.sh; cd %s && beta_diversity_through_plots.py -i single_rarefied_otu_table.biom -o beta_diversity/ -t non_chimeric_rep_set_aligned_pfiltered.tre \
-m %s'" % (ec_key,dns,anl_fold,merge_metadata))
log_output("beta_diversity_through_plots.py complete!")
return
if __name__ == "__main__":
print "\n\t\t\t\033[1mWelcome to the Microbiome Analysis through Workflow of QIIME, MAWQ program (pronounced 'mock') brought to you by the Lynch Lab!\033[0m"
print "\tTo run the script with default parameters, just press enter to each question without entering a value. To \
exit the pipeline at any point in time, press Ctrl+C\n\n"
try:
inputfile = raw_input("1) Please provide the full name of the input-file (Type help for input-file format): ")
checked = input_check(inputfile)
inputfile = checked[1]
mapping_check(checked[0])
seq_data = raw_input("\n1) What's the path to the MiSeq run folder? (Default: /data/MiSeq_16S_data/MiSeqAnalysis) ")
print "\nThe following questions are for flash program: \n"
flash_q1 = "1) What's the minimum overlap length between reads [-m flag]? (Default: 225, if length of Read 2 > 250) "
m_min = check_value(raw_input(flash_q1),flash_q1,"integer")
flash_q2 = "2) What's the read length [-r flag]? (Default: 251) "
read_len = check_value(raw_input(flash_q2),flash_q2,"integer")
print "\nThe following questions are for split_libraries_fastq.py script: \n"
split_q1 = "1) What's the maximum unacceptable Phred quality score [-q flag]? (Default: 30) "
phred = check_value(raw_input(split_q1),split_q1,"integer")
split_q2 = "2) What's the max number of consecutive low quality base calls allowed before truncating a read [-r flag]? (Default: 3) "
max_bad_run = check_value(raw_input(split_q2),split_q2,"integer")
split_q3 = "3) What's the min number of consecutive high quality base calls to include a read (per single end read) as a fraction \
of the input read length [-p flag]? (Default: 0.75) "
min_rl_frac = check_value(raw_input(split_q3),split_q3,"float")
split_q4 = "4) What's the max number of N characters allowed in a sequence to retain it [-n flag]? (Default: 0) "
n_chars = check_value(raw_input(split_q4),split_q4,"integer")
split_q5 = "5) What's the type of barcode used [--barcode_type flag]? (Default: 12) "
barcode = check_value(raw_input(split_q5),split_q5,"integer")
split_q6 = "6) What's the start seq_ids as ascending integers beginning with start_seq_id [-s flag]? (Default: 0) "
start_seq = check_value(raw_input(split_q6),split_q6,"integer")
print "\nThe following questions are for pick_open_reference_otus.py script: \n"
otupick_q1 = "1) How many jobs do you wish to run in parallel? (Default: 4) "
parallel = check_value(raw_input(otupick_q1),otupick_q1,"integer")
ref_db = raw_input("2) What's the full path to the reference database? \
(Default: /home/ubuntu/qiime_software/gg_otus-13_8-release/rep_set/97_otus.fasta) ")
print "\nThe following questions are regarding your Amazon EC2 instance\n"
ec_key = raw_input("1) Please enter the keypair for your EC2 instance: ") #qiime_wrapper_data_storage.pem
check_ec_permission(ec_key)
dns = raw_input("2) Please enter the public DNS for your EC2 instance: ") #ec2-54-237-163-108.compute-1.amazonaws.com
anl_fold = raw_input("3) What would you like to call your analysis folder on the EC2 instance? ")
startTime = datetime.now()
preproceSteps(seq_data,m_min,read_len,inputfile)
scp_amazon(inputfile,ec_key,dns,anl_fold)
split_library(ec_key,dns,anl_fold,inputfile,phred,max_bad_run,min_rl_frac,n_chars,barcode,start_seq)
open_otus_till_biom(parallel,ref_db,ec_key,dns,anl_fold)
viewtable = raw_input("The summary table of the final OTU table is ready. Type 'yes' to view it. \
Are you ready? ")
summary_view(ec_key,dns,anl_fold,viewtable)
print "\nThe following question is for 'single_rarefaction.py' script: \n"
depth = raw_input("1) What is the number of sequences to subsample per sample [-d flag]? (No default): ")
depth = rarefaction_check(depth)
print "\nThe following question is for 'summarize_taxa_through_plots.py', 'alpha_rarefaction.py', \
and 'beta_diversity_through_plots.py' script: \n"
merge_metadata = raw_input("1) What is the name of the final mapping data file for all runs [-m flag]? (No default): ")
merge_metadata_checked = mapping_check([merge_metadata])
os.system("scp -i %s %s ubuntu@%s:/home/ubuntu/%s" % (ec_key,merge_metadata_checked[0],dns,anl_fold))
summary_plots(ec_key,dns,anl_fold,depth,merge_metadata_checked[0])
log_parse("wrapper_log_file.txt","logging_module_output.txt")
os.system('rm logging_module_output.txt')
print "\n"+"Task Completed! Time it took to complete the task: "+ str(datetime.now()-startTime)
except KeyboardInterrupt:
print "\n\nThanks for using (or attempting to use) the pipeline. Good-bye!\n"
| |
import sys
import configparser
import requests
import dateutil.parser
import argparse
import csv
import os
def get_configs(conf_file):
result = {}
cp = configparser.ConfigParser()
cp.read(conf_file)
result['server'] = cp.get('Octopus', 'server')
result['api_key'] = cp.get('Octopus', 'api_key')
result['dir_tmp'] = cp.get('Octopus', 'dir_tmp')
return result
class UrlFactory:
def __init__(self, server):
self.server = server
def url_api(self):
return self.server + '/api'
def url_environment(self, env_id='all'):
return self.url_api() + '/environments/all' if env_id == 'all' else self.url_api() + '/environments/' + env_id
def url_project(self, proj_id='all'):
return self.url_api() + '/projects/all' if proj_id == 'all' else self.url_api() + '/projects/' + proj_id
def url_deployments(self):
return self.url_api() + '/deployments'
def url_release(self, rel_id='all'):
return self.url_api() + '/releases' if rel_id == 'all' else self.url_api() + '/releases/' + rel_id
def url_next(self, crawl, json):
if crawl:
return self.server + json['Links']['Page.Next'] if json['Links'] and 'Page.Next' in json['Links'] else None
else:
return False
def url_machines(self):
return self.url_api() + '/machines/all'
class OctopyIO:
def __init__(self, cache_dir):
self.cache_dir = cache_dir
def save_dict(self, file_name, dictionary):
if not os.path.isdir(self.cache_dir):
os.makedirs(self.cache_dir)
with open('%s/%s' % (self.cache_dir, file_name), 'w') as f:
w = csv.writer(f, delimiter=',', quotechar='|', lineterminator='\n')
for k in dictionary.keys():
w.writerow([k, dictionary[k]])
def read_dict(self, file_name):
result = {}
full_path = '%s/%s' % (self.cache_dir, file_name)
if os.path.exists(full_path):
with open(full_path, 'r') as f:
reader = csv.reader(f, delimiter=',', quotechar='|', lineterminator='\n')
for row in reader:
result[row[0]] = row[1]
return result
def save_list(self, file_name, list, keys):
if not os.path.isdir(self.cache_dir):
os.makedirs(self.cache_dir)
with open('%s/%s' % (self.cache_dir, file_name), 'w') as f:
w = csv.DictWriter(f, keys, delimiter=',', quotechar='|', lineterminator='\n')
w.writerows(list)
def read_list(self, file_name, keys):
result = []
full_path = '%s/%s' % (self.cache_dir, file_name)
if os.path.exists(full_path):
with open(full_path, 'r') as f:
reader = csv.DictReader(f, keys, delimiter=',', quotechar='|', lineterminator='\n')
for row in reader:
result.append(row)
return result
class Octopy:
def __init__(self, config):
self.environments = {}
self.projects = {}
self.releases = {}
self.machines = {}
self.deployments = []
self.config = config
self.url_factory = UrlFactory(self.config['server'])
self.io = OctopyIO(self.config['dir_tmp'])
# keys for csv
self.keys_deployments = ['Id', 'Date', 'Time', 'Environment', 'Project', 'Release', 'SpecificMachines']
# cache file names
self.file_environments = 'environments.csv'
self.file_projects = 'projects.csv'
self.file_releases = 'releases.csv'
self.file_deployments = 'deployments.csv'
self.file_machines = 'machines.csv'
def get_environments(self, cache=False):
if cache:
return self.io.read_dict(self.file_environments)
self.environments = Octopy.__extract_objects(self.__scrape(self.url_factory.url_environment()), 'Id', 'Name')
self.io.save_dict(self.file_environments, self.environments)
return self.environments
def get_projects(self, cache=False):
if cache:
return self.io.read_dict(self.file_projects)
self.projects = Octopy.__extract_objects(self.__scrape(self.url_factory.url_project()), 'Id', 'Name')
self.io.save_dict(self.file_projects, self.projects)
return self.projects
def get_machines(self, cache=False):
if cache:
return self.io.read_dict(self.file_machines)
self.machines = Octopy.__extract_objects(self.__scrape(self.url_factory.url_machines()), 'Id', 'Name')
self.io.save_dict(self.file_machines, self.machines)
return self.machines
def get_releases(self, cache=False, crawl=False):
self.releases = self.io.read_dict(self.file_releases)
if cache:
return self.releases
url = self.url_factory.url_release()
while url:
response = self.__scrape(url)
releases = Octopy.__extract_objects(response['Items'], 'Id', 'Version')
diff = set(releases.keys()) - set(self.releases.keys())
if len(diff) > 0:
for d in list(diff):
self.releases.update({d: releases[d]})
# Abort crawling when no updates found
url = False if len(diff) == 0 and crawl else self.url_factory.url_next(crawl, response)
self.io.save_dict(self.file_releases, self.releases)
return self.releases
def get_deployments(self, cache=False, crawl=False):
self.deployments = self.io.read_list(self.file_deployments, self.keys_deployments)
if cache:
return self.deployments
self.get_environments(cache=False)
self.get_projects(cache=False)
self.get_machines(cache=False)
self.get_releases(False, crawl)
abort = False
ids = {d['Id'] for d in self.deployments}
url = self.url_factory.url_deployments()
while url:
response = self.__scrape(url)
for dep in response['Items']:
if Octopy.__get_numeric_deployment_id(dep['Id']) in ids:
# Stop processing, deployment is already saved
abort = True
break
if not crawl:
if dep['ReleaseId'] not in self.releases:
release = self.__scrape(self.url_factory.url_release(dep['ReleaseId']))
self.releases[release['Id']] = release['Version']
dt = dateutil.parser.parse(dep['Created'])
self.deployments.append({
'Id': Octopy.__get_numeric_deployment_id(dep['Id']),
'Date': dt.date(),
'Time': dt.time().strftime('%H:%M'),
'Environment': self.environments[dep['EnvironmentId']],
'Project': self.projects[dep['ProjectId']],
'Release': self.releases[dep['ReleaseId']],
'SpecificMachines': self.__extract_machines(dep['SpecificMachineIds'])
})
url = False if abort else self.url_factory.url_next(crawl, response)
self.io.save_dict(self.file_projects, self.projects)
self.io.save_dict(self.file_releases, self.releases)
self.io.save_list(self.file_deployments, self.deployments, self.keys_deployments)
return self.deployments
def __extract_machines(self, machine_ids):
if machine_ids:
for m_id in machine_ids:
if m_id not in self.machines:
# Assume machine is deleted if it doesn't exist in 'machines' array.
self.machines[m_id] = 'DEL-' + m_id
return ','.join([self.machines[x] for x in machine_ids])
else:
return ''
def __scrape(self, url):
if __debug__:
print('GET:', url)
return requests.get(url, headers={'X-Octopus-ApiKey': self.config['api_key']}).json()
@staticmethod
def __extract_objects(json, o_id, o_value):
result = {}
for obj in json:
result[obj[o_id]] = obj[o_value]
return result
@staticmethod
def __get_numeric_deployment_id(an_deployment_id):
return an_deployment_id[12:]
def main():
config = get_configs('octopy.cfg')
if not config['server'] or not config['api_key']:
print('Please, specify Octopus parameters in configuration file!')
sys.exit(1)
parser = argparse.ArgumentParser(
description='Octopy is a small application that prints out information from Octopus in a convenient format.')
parser.add_argument('--cmd', dest='command', help="Octopy command (try `env`, `proj`, `rel`, `dep` and `mac`).")
parser.add_argument('--cache', dest='cache', action='store_true', help="Read data from cache if available.")
parser.add_argument('--headers', dest='headers', action='store_true', help='Display headers in output.')
parser.add_argument('--crawl', dest='crawl', action='store_true',
help='By default only 30 items per page are returned by API. This parameter enables link crawl. '
'All resources from the `Link` collection will be crawled by Octopy and data will be saved to cache. '
'This parameter has no effect on `env` and `proj` commands.')
args = parser.parse_args()
octopy = Octopy(config)
if args.command == 'env': # environments
environments = octopy.get_environments(args.cache)
if args.headers:
print('Id,Name')
for key in environments.keys():
print('%s,%s' % (key, environments[key]))
elif args.command == 'proj': # projects
projects = octopy.get_projects(args.cache)
if args.headers:
print('Id,Name')
for key in projects.keys():
print('%s,%s' % (key, projects[key]))
elif args.command == 'rel': # releases
releases = octopy.get_releases(args.cache, args.crawl)
if args.headers:
print('Id,Version')
for key in releases.keys():
print('%s,%s' % (key, releases[key]))
elif args.command == 'mac': # machines
machines = octopy.get_machines(args.cache)
if args.headers:
print('Id,Name')
for key in machines.keys():
print('%s,%s' % (key, machines[key]))
elif args.command == 'dep': # deployments
deployments = octopy.get_deployments(args.cache, args.crawl)
if args.headers:
print('Date,Time,Environment,Project,Release,SpecificMachines')
for dep in deployments:
print('%s,%s,%s,%s,%s,%s' %\
(dep['Date'], dep['Time'], dep['Environment'], dep['Project'], dep['Release'], dep['SpecificMachines']))
else:
print("Unknown command '%s'" % args.command)
parser.print_help()
if __name__ == '__main__':
main()
| |
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" In-Memory Disk File Interface for Swift Object Server"""
import cStringIO
import time
import hashlib
from contextlib import contextmanager
from eventlet import Timeout
from swift.common.utils import Timestamp
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
DiskFileCollision, DiskFileDeleted, DiskFileNotOpen
from swift.common.swob import multi_range_iterator
class InMemoryFileSystem(object):
"""
A very simplistic in-memory file system scheme.
There is one dictionary mapping a given object name to a tuple. The first
entry in the tuble is the cStringIO buffer representing the file contents,
the second entry is the metadata dictionary.
"""
def __init__(self):
self._filesystem = {}
def get_object(self, name):
val = self._filesystem.get(name)
if val is None:
data, metadata = None, None
else:
data, metadata = val
return data, metadata
def put_object(self, name, data, metadata):
self._filesystem[name] = (data, metadata)
def del_object(self, name):
del self._filesystem[name]
def get_diskfile(self, account, container, obj, **kwargs):
return DiskFile(self, account, container, obj)
class DiskFileWriter(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation.
Encapsulation of the write context for servicing PUT REST API
requests. Serves as the context manager object for DiskFile's create()
method.
:param fs: internal file system object to use
:param name: standard object name
:param fp: `StringIO` in-memory representation object
"""
def __init__(self, fs, name, fp):
self._filesystem = fs
self._name = name
self._fp = fp
self._upload_size = 0
def write(self, chunk):
"""
Write a chunk of data into the `StringIO` object.
:param chunk: the chunk of data to write as a string object
"""
self._fp.write(chunk)
self._upload_size += len(chunk)
return self._upload_size
def put(self, metadata):
"""
Make the final association in the in-memory file system for this name
with the `StringIO` object.
:param metadata: dictionary of metadata to be written
:param extension: extension to be used when making the file
"""
metadata['name'] = self._name
self._filesystem.put_object(self._name, self._fp, metadata)
class DiskFileReader(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation.
Encapsulation of the read context for servicing GET REST API
requests. Serves as the context manager object for DiskFile's reader()
method.
:param name: object name
:param fp: open file object pointer reference
:param obj_size: on-disk size of object in bytes
:param etag: MD5 hash of object from metadata
"""
def __init__(self, name, fp, obj_size, etag):
self._name = name
self._fp = fp
self._obj_size = obj_size
self._etag = etag
#
self._iter_etag = None
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
self._suppress_file_closing = False
#
self.was_quarantined = ''
def __iter__(self):
try:
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
if self._fp.tell() == 0:
self._started_at_0 = True
self._iter_etag = hashlib.md5()
while True:
chunk = self._fp.read()
if chunk:
if self._iter_etag:
self._iter_etag.update(chunk)
self._bytes_read += len(chunk)
yield chunk
else:
self._read_to_eof = True
break
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_range(self, start, stop):
if start or start == 0:
self._fp.seek(start)
if stop is not None:
length = stop - start
else:
length = None
try:
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_ranges(self, ranges, content_type, boundary, size):
if not ranges:
yield ''
else:
try:
self._suppress_file_closing = True
for chunk in multi_range_iterator(
ranges, content_type, boundary, size,
self.app_iter_range):
yield chunk
finally:
self._suppress_file_closing = False
try:
self.close()
except DiskFileQuarantined:
pass
def _quarantine(self, msg):
self.was_quarantined = msg
def _handle_close_quarantine(self):
if self._bytes_read != self._obj_size:
self._quarantine(
"Bytes read: %s, does not match metadata: %s" % (
self.bytes_read, self._obj_size))
elif self._iter_etag and \
self._etag != self._iter_etag.hexdigest():
self._quarantine(
"ETag %s and file's md5 %s do not match" % (
self._etag, self._iter_etag.hexdigest()))
def close(self):
"""
Close the file. Will handle quarantining file if necessary.
"""
if self._fp:
try:
if self._started_at_0 and self._read_to_eof:
self._handle_close_quarantine()
except (Exception, Timeout):
pass
finally:
self._fp = None
class DiskFile(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation. This
example duck-types the reference implementation DiskFile class.
Manage object files in-memory.
:param mgr: DiskFileManager
:param device_path: path to the target device or drive
:param threadpool: thread pool to use for blocking operations
:param partition: partition on the device in which the object lives
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param keep_cache: caller's preference for keeping data read in the cache
"""
def __init__(self, fs, account, container, obj):
self._name = '/' + '/'.join((account, container, obj))
self._metadata = None
self._fp = None
self._filesystem = fs
def open(self):
"""
Open the file and read the metadata.
This method must populate the _metadata attribute.
:raises DiskFileCollision: on name mis-match with metadata
:raises DiskFileDeleted: if it does not exist, or a tombstone is
present
:raises DiskFileQuarantined: if while reading metadata of the file
some data did pass cross checks
"""
fp, self._metadata = self._filesystem.get_object(self._name)
if fp is None:
raise DiskFileDeleted()
self._fp = self._verify_data_file(fp)
self._metadata = self._metadata or {}
return self
def __enter__(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self
def __exit__(self, t, v, tb):
if self._fp is not None:
self._fp = None
def _verify_data_file(self, fp):
"""
Verify the metadata's name value matches what we think the object is
named.
:raises DiskFileCollision: if the metadata stored name does not match
the referenced name of the file
:raises DiskFileNotExist: if the object has expired
:raises DiskFileQuarantined: if data inconsistencies were detected
between the metadata and the file-system
metadata
"""
try:
mname = self._metadata['name']
except KeyError:
raise self._quarantine(self._name, "missing name metadata")
else:
if mname != self._name:
raise DiskFileCollision('Client path does not match path '
'stored in object metadata')
try:
x_delete_at = int(self._metadata['X-Delete-At'])
except KeyError:
pass
except ValueError:
# Quarantine, the x-delete-at key is present but not an
# integer.
raise self._quarantine(
self._name, "bad metadata x-delete-at value %s" % (
self._metadata['X-Delete-At']))
else:
if x_delete_at <= time.time():
raise DiskFileNotExist('Expired')
try:
metadata_size = int(self._metadata['Content-Length'])
except KeyError:
raise self._quarantine(
self._name, "missing content-length in metadata")
except ValueError:
# Quarantine, the content-length key is present but not an
# integer.
raise self._quarantine(
self._name, "bad metadata content-length value %s" % (
self._metadata['Content-Length']))
try:
fp.seek(0, 2)
obj_size = fp.tell()
fp.seek(0, 0)
except OSError as err:
# Quarantine, we can't successfully stat the file.
raise self._quarantine(self._name, "not stat-able: %s" % err)
if obj_size != metadata_size:
raise self._quarantine(
self._name, "metadata content-length %s does"
" not match actual object size %s" % (
metadata_size, obj_size))
return fp
def get_metadata(self):
"""
Provide the metadata for an object as a dictionary.
:returns: object's metadata dictionary
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata
def read_metadata(self):
"""
Return the metadata for an object.
:returns: metadata dictionary for an object
"""
with self.open():
return self.get_metadata()
def reader(self, keep_cache=False):
"""
Return a swift.common.swob.Response class compatible "app_iter"
object. The responsibility of closing the open file is passed to the
DiskFileReader object.
:param keep_cache:
"""
dr = DiskFileReader(self._name, self._fp,
int(self._metadata['Content-Length']),
self._metadata['ETag'])
# At this point the reader object is now responsible for
# the file pointer.
self._fp = None
return dr
@contextmanager
def create(self, size=None):
"""
Context manager to create a file. We create a temporary file first, and
then return a DiskFileWriter object to encapsulate the state.
:param size: optional initial size of file to explicitly allocate on
disk
:raises DiskFileNoSpace: if a size is specified and allocation fails
"""
fp = cStringIO.StringIO()
try:
yield DiskFileWriter(self._filesystem, self._name, fp)
finally:
del fp
def write_metadata(self, metadata):
"""
Write a block of metadata to an object.
"""
cur_fp = self._filesystem.get(self._name)
if cur_fp is not None:
self._filesystem[self._name] = (cur_fp, metadata)
def delete(self, timestamp):
"""
Perform a delete for the given object in the given container under the
given account.
This creates a tombstone file with the given timestamp, and removes
any older versions of the object file. Any file that has an older
timestamp than timestamp will be deleted.
:param timestamp: timestamp to compare with each file
"""
fp, md = self._filesystem.get_object(self._name)
if md['X-Timestamp'] < Timestamp(timestamp):
self._filesystem.del_object(self._name)
| |
# TODO:
# - Front matter documentation.
# - Actions should be loaded from a python file provided via the command line.
# - Alert email from address should be configurable from the command line.
# - It should be possible to disable the alert emails.
# - The logging level should be configurable from the command line.
import BaseHTTPServer
import os
import sys
import time
import urlparse
import json
import argparse
import sh
import logging
import smtplib
from abc import ABCMeta, abstractmethod
from email.mime.text import MIMEText
# The path to this file.
scriptpath = os.path.realpath(__file__)
# Initialize pythons logging facilities.
logging.basicConfig(
filename=None,
# MOD: Change to alter logging verbosity. Options are:
# -logging.NOTSET
# -logging.DEBUG
# -logging.INFO
# -logging.WARNING
# -logging.ERROR
# -logging.CRITICAL
level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M')
# Configure the command line interface.
parser = argparse.ArgumentParser(
'''A webserver that will map github webhook post requests to python code. The
mapping can be modified by modifying this script.''')
parser.add_argument("address",
help="IP or DNS name this webserver will bind to.")
parser.add_argument("port",help="Port number to bind to.",type=int)
args = parser.parse_args()
# The base class that all actions should extend.
class Action():
'''An abstract class that represents an action to be taken on a webhook post
that matches certain criteria.
'''
def __init__(self,uid,sp,hdrs,json):
'''Initialize an action.
Arguments:
self -- Reference to "this" object.
uid -- Some unique identifier for a webhook post.
sp -- The path to the calling script.
hdrs -- A dictionary mappy HTTP headers to values.
json -- The payload as a JSON object. Key-value pairs are accessible
via the dictionary interface.
'''
self.uid = uid
self.scriptpath = sp
self.headers = hdrs
self.json = json
def log(self,lvl,msg):
'''Print a log mesage with information specific to an action.
Arguments:
self -- Refence to "this" object.
lvl -- The logging level of this message.
msg -- The message to be logged.
'''
logging.log(lvl,
"[UID=%s,PID=%s,Action=%s] %s"
% (self.uid,os.getpid(),self.__class__.__name__,msg))
@abstractmethod
def info(self):
'''Information to place in an alert email sent to repository users.'''
pass
@abstractmethod
def match(self):
'''Return true if this action should be performed. Return false
otherwise.
Arguments:
self -- Reference to "this" object.
'''
pass
@abstractmethod
def act(self):
'''Return true if this action should be performed. Return false
otherwise.
Arguments:
self -- Reference to "this" object.
'''
pass
# The action list will store actions to be run.
actionlist = []
class CloneAction(Action):
''' This class demonstrates how to write an action.'''
def match(self):
''' Match push github events.'''
self.alert = ""
self.log(logging.DEBUG,
"Checking if delivery %s from repo %s should be acted upon"
% (self.uid,self.json['repository']['url']))
if self.headers['X-GitHub-Event'] == 'push':
self.alert += "Push action received, this action should run.\n"
else:
self.alert += "Push action received, this action should not run.\n"
return self.headers['X-GitHub-Event'] == 'push'
def act(self):
'''Clone the repository if it has not already be cloned.'''
repo_name = self.json['repository']['name']
repo_url = ("git@github.umn.edu:" +
self.json['repository']['organization'] +
"/" +
self.json['repository']['name'])
git = sh.git.bake()
self.log(logging.DEBUG,
"checking for repo %s in %s" % (repo_name, sh.pwd()))
if not os.path.exists(repo_name):
self.log(logging.DEBUG,"Cloning new repository")
try:
git.clone(repo_url)
self.alert += "Cloned repository at %s" % repo_url
self.log(logging.DEBUG,"Git clone succeeded.")
except:
msg = ("Attempt to clone repository at %s: %s"
% (repo_url,sys.exc_info()[0]))
self.log(logging.ERROR,msg)
self.alert += msg
else:
msg = "Directory already exists, ignoring clone request of repository %s.\n" % repo_url
self.alert += msg
self.log(logging.WARN,msg)
def info(self):
'''Report pertinent information back to the user.'''
return self.alert
actionlist.append(CloneAction)
class HookHandler(BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "Handler/0.1"
# NB: Some events do not have an appropriate email address readily
# available.
def getEmailAddy(self,hdrs,json):
if hdrs['X-GitHub-Event'] == 'push':
return json['head_commit']['author']['email']
else:
return None
def routeToAction(self,hdrs,json):
global action
global scriptpath
content = ("Handling %s event from repository %s.\n\n"
% (hdrs['X-GitHub-Event'],json['repository']['url']))
for ac in actionlist:
a = ac(hdrs['X-GitHub-Delivery'],scriptpath,hdrs,json)
if a.match():
a.act()
content += "Messages related to %s.\n" % a.__class__.__name__
content += "=================================================\n"
content += a.info()
content += "\n=================================================\n"
addy = self.getEmailAddy(hdrs,json)
if addy != None:
email = MIMEText(content)
email['Subject'] = ("GitHub Webhook Handler: %s from %s received.\n"
% (hdrs['X-GitHub-Event'],json['repository']['url']))
# XXX: Should be configurable from the command line.
email['From'] = "csci2041@cs.umn.edu"
email['To'] = addy
s = smtplib.SMTP('localhost')
s.sendmail("csci2041@cs.umn.edu", addy, email.as_string())
def do_GET(self):
logging.info("Received get request with the following headers: %s" % self.headers)
content_len = int(self.headers.getheader('content-length', 0))
get_body = self.rfile.read(content_len)
logging.info("Received get request with the following body: %s" % get_body)
logging.warning("Ignoring get request.")
self.send_response(200)
def do_POST(self):
# Check that the IP is within the GH ranges
if not any(self.client_address[0].startswith(IP)
for IP in ('134.84.231')):
logging.warning("Received post request from invalid IP!")
self.send_error(403)
logging.info("Received post request with the following headers: %s" % self.headers)
content_len = int(self.headers.getheader('content-length', 0))
payload = json.loads(self.rfile.read(content_len))
logging.info("Received post request with the following JSON body: %s" % payload)
logging.info("Forking to route and execute actions")
pid = os.fork()
if pid == 0:
self.routeToAction(self.headers,payload)
os._exit(0)
else:
logging.info("Routing being handled by process %s" %pid)
self.send_response(200)
if __name__ == '__main__':
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((args.address, int(args.port)), HookHandler)
logging.info("Server started at %s:%s"% (args.address, int(args.port)))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info("Server stopped at %s:%s"% (args.address, int(args.port)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.